repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1
value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1
value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_bios_boot_resource | def _get_bios_boot_resource(self, data):
"""Get the Boot resource like BootSources.
:param data: Existing Bios settings of the server.
:returns: boot settings.
:raises: IloCommandNotSupportedError, if resource is not found.
:raises: IloError, on an error from iLO.
"""
try:
boot_uri = data['links']['Boot']['href']
except KeyError:
msg = ('Boot resource not found.')
raise exception.IloCommandNotSupportedError(msg)
status, headers, boot_settings = self._rest_get(boot_uri)
if status != 200:
msg = self._get_extended_error(boot_settings)
raise exception.IloError(msg)
return boot_settings | python | def _get_bios_boot_resource(self, data):
"""Get the Boot resource like BootSources.
:param data: Existing Bios settings of the server.
:returns: boot settings.
:raises: IloCommandNotSupportedError, if resource is not found.
:raises: IloError, on an error from iLO.
"""
try:
boot_uri = data['links']['Boot']['href']
except KeyError:
msg = ('Boot resource not found.')
raise exception.IloCommandNotSupportedError(msg)
status, headers, boot_settings = self._rest_get(boot_uri)
if status != 200:
msg = self._get_extended_error(boot_settings)
raise exception.IloError(msg)
return boot_settings | [
"def",
"_get_bios_boot_resource",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"boot_uri",
"=",
"data",
"[",
"'links'",
"]",
"[",
"'Boot'",
"]",
"[",
"'href'",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"(",
"'Boot resource not found.'",
")",
"raise",
... | Get the Boot resource like BootSources.
:param data: Existing Bios settings of the server.
:returns: boot settings.
:raises: IloCommandNotSupportedError, if resource is not found.
:raises: IloError, on an error from iLO. | [
"Get",
"the",
"Boot",
"resource",
"like",
"BootSources",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L542-L562 | train | 41,600 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_bios_mappings_resource | def _get_bios_mappings_resource(self, data):
"""Get the Mappings resource.
:param data: Existing Bios settings of the server.
:returns: mappings settings.
:raises: IloCommandNotSupportedError, if resource is not found.
:raises: IloError, on an error from iLO.
"""
try:
map_uri = data['links']['Mappings']['href']
except KeyError:
msg = ('Mappings resource not found.')
raise exception.IloCommandNotSupportedError(msg)
status, headers, map_settings = self._rest_get(map_uri)
if status != 200:
msg = self._get_extended_error(map_settings)
raise exception.IloError(msg)
return map_settings | python | def _get_bios_mappings_resource(self, data):
"""Get the Mappings resource.
:param data: Existing Bios settings of the server.
:returns: mappings settings.
:raises: IloCommandNotSupportedError, if resource is not found.
:raises: IloError, on an error from iLO.
"""
try:
map_uri = data['links']['Mappings']['href']
except KeyError:
msg = ('Mappings resource not found.')
raise exception.IloCommandNotSupportedError(msg)
status, headers, map_settings = self._rest_get(map_uri)
if status != 200:
msg = self._get_extended_error(map_settings)
raise exception.IloError(msg)
return map_settings | [
"def",
"_get_bios_mappings_resource",
"(",
"self",
",",
"data",
")",
":",
"try",
":",
"map_uri",
"=",
"data",
"[",
"'links'",
"]",
"[",
"'Mappings'",
"]",
"[",
"'href'",
"]",
"except",
"KeyError",
":",
"msg",
"=",
"(",
"'Mappings resource not found.'",
")",
... | Get the Mappings resource.
:param data: Existing Bios settings of the server.
:returns: mappings settings.
:raises: IloCommandNotSupportedError, if resource is not found.
:raises: IloError, on an error from iLO. | [
"Get",
"the",
"Mappings",
"resource",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L564-L583 | train | 41,601 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._check_iscsi_rest_patch_allowed | def _check_iscsi_rest_patch_allowed(self):
"""Checks if patch is supported on iscsi.
:returns: iscsi url.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
headers, bios_uri, bios_settings = self._check_bios_resource()
# Check if the bios resource exists.
if('links' in bios_settings and 'iScsi' in bios_settings['links']):
iscsi_uri = bios_settings['links']['iScsi']['href']
status, headers, settings = self._rest_get(iscsi_uri)
if status != 200:
msg = self._get_extended_error(settings)
raise exception.IloError(msg)
if not self._operation_allowed(headers, 'PATCH'):
headers, iscsi_uri, settings = (
self._get_iscsi_settings_resource(settings))
self._validate_if_patch_supported(headers, iscsi_uri)
return iscsi_uri
else:
msg = ('"links/iScsi" section in bios'
' does not exist')
raise exception.IloCommandNotSupportedError(msg) | python | def _check_iscsi_rest_patch_allowed(self):
"""Checks if patch is supported on iscsi.
:returns: iscsi url.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
headers, bios_uri, bios_settings = self._check_bios_resource()
# Check if the bios resource exists.
if('links' in bios_settings and 'iScsi' in bios_settings['links']):
iscsi_uri = bios_settings['links']['iScsi']['href']
status, headers, settings = self._rest_get(iscsi_uri)
if status != 200:
msg = self._get_extended_error(settings)
raise exception.IloError(msg)
if not self._operation_allowed(headers, 'PATCH'):
headers, iscsi_uri, settings = (
self._get_iscsi_settings_resource(settings))
self._validate_if_patch_supported(headers, iscsi_uri)
return iscsi_uri
else:
msg = ('"links/iScsi" section in bios'
' does not exist')
raise exception.IloCommandNotSupportedError(msg) | [
"def",
"_check_iscsi_rest_patch_allowed",
"(",
"self",
")",
":",
"headers",
",",
"bios_uri",
",",
"bios_settings",
"=",
"self",
".",
"_check_bios_resource",
"(",
")",
"# Check if the bios resource exists.",
"if",
"(",
"'links'",
"in",
"bios_settings",
"and",
"'iScsi'"... | Checks if patch is supported on iscsi.
:returns: iscsi url.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Checks",
"if",
"patch",
"is",
"supported",
"on",
"iscsi",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L585-L615 | train | 41,602 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._change_iscsi_settings | def _change_iscsi_settings(self, iscsi_info):
"""Change iSCSI settings.
:param iscsi_info: A dictionary that contains information of iSCSI
target like target_name, lun, ip_address, port etc.
:raises: IloError, on an error from iLO.
"""
headers, bios_uri, bios_settings = self._check_bios_resource()
# Get the Mappings resource.
map_settings = self._get_bios_mappings_resource(bios_settings)
nics = []
for mapping in map_settings['BiosPciSettingsMappings']:
for subinstance in mapping['Subinstances']:
for association in subinstance['Associations']:
if 'NicBoot' in association:
nics.append(association)
if not nics:
msg = ('No nics found')
raise exception.IloError(msg)
iscsi_uri = self._check_iscsi_rest_patch_allowed()
# Set iSCSI info to all nics
iscsi_infos = []
for nic in nics:
data = iscsi_info.copy()
data['iSCSIBootAttemptName'] = nic
data['iSCSINicSource'] = nic
data['iSCSIBootAttemptInstance'] = nics.index(nic) + 1
iscsi_infos.append(data)
patch_data = {'iSCSIBootSources': iscsi_infos}
status, headers, response = self._rest_patch(iscsi_uri,
None, patch_data)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | python | def _change_iscsi_settings(self, iscsi_info):
"""Change iSCSI settings.
:param iscsi_info: A dictionary that contains information of iSCSI
target like target_name, lun, ip_address, port etc.
:raises: IloError, on an error from iLO.
"""
headers, bios_uri, bios_settings = self._check_bios_resource()
# Get the Mappings resource.
map_settings = self._get_bios_mappings_resource(bios_settings)
nics = []
for mapping in map_settings['BiosPciSettingsMappings']:
for subinstance in mapping['Subinstances']:
for association in subinstance['Associations']:
if 'NicBoot' in association:
nics.append(association)
if not nics:
msg = ('No nics found')
raise exception.IloError(msg)
iscsi_uri = self._check_iscsi_rest_patch_allowed()
# Set iSCSI info to all nics
iscsi_infos = []
for nic in nics:
data = iscsi_info.copy()
data['iSCSIBootAttemptName'] = nic
data['iSCSINicSource'] = nic
data['iSCSIBootAttemptInstance'] = nics.index(nic) + 1
iscsi_infos.append(data)
patch_data = {'iSCSIBootSources': iscsi_infos}
status, headers, response = self._rest_patch(iscsi_uri,
None, patch_data)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | [
"def",
"_change_iscsi_settings",
"(",
"self",
",",
"iscsi_info",
")",
":",
"headers",
",",
"bios_uri",
",",
"bios_settings",
"=",
"self",
".",
"_check_bios_resource",
"(",
")",
"# Get the Mappings resource.",
"map_settings",
"=",
"self",
".",
"_get_bios_mappings_resou... | Change iSCSI settings.
:param iscsi_info: A dictionary that contains information of iSCSI
target like target_name, lun, ip_address, port etc.
:raises: IloError, on an error from iLO. | [
"Change",
"iSCSI",
"settings",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L617-L653 | train | 41,603 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._change_secure_boot_settings | def _change_secure_boot_settings(self, property, value):
"""Change secure boot settings on the server."""
system = self._get_host_details()
# find the BIOS URI
if ('links' not in system['Oem']['Hp'] or
'SecureBoot' not in system['Oem']['Hp']['links']):
msg = (' "SecureBoot" resource or feature is not '
'supported on this system')
raise exception.IloCommandNotSupportedError(msg)
secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']
# Change the property required
new_secure_boot_settings = {}
new_secure_boot_settings[property] = value
# perform the patch
status, headers, response = self._rest_patch(
secure_boot_uri, None, new_secure_boot_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# Change the bios setting as a workaround to enable secure boot
# Can be removed when fixed for Gen9 snap2
val = self._get_bios_setting('CustomPostMessage')
val = val.rstrip() if val.endswith(" ") else val+" "
self._change_bios_setting({'CustomPostMessage': val}) | python | def _change_secure_boot_settings(self, property, value):
"""Change secure boot settings on the server."""
system = self._get_host_details()
# find the BIOS URI
if ('links' not in system['Oem']['Hp'] or
'SecureBoot' not in system['Oem']['Hp']['links']):
msg = (' "SecureBoot" resource or feature is not '
'supported on this system')
raise exception.IloCommandNotSupportedError(msg)
secure_boot_uri = system['Oem']['Hp']['links']['SecureBoot']['href']
# Change the property required
new_secure_boot_settings = {}
new_secure_boot_settings[property] = value
# perform the patch
status, headers, response = self._rest_patch(
secure_boot_uri, None, new_secure_boot_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# Change the bios setting as a workaround to enable secure boot
# Can be removed when fixed for Gen9 snap2
val = self._get_bios_setting('CustomPostMessage')
val = val.rstrip() if val.endswith(" ") else val+" "
self._change_bios_setting({'CustomPostMessage': val}) | [
"def",
"_change_secure_boot_settings",
"(",
"self",
",",
"property",
",",
"value",
")",
":",
"system",
"=",
"self",
".",
"_get_host_details",
"(",
")",
"# find the BIOS URI",
"if",
"(",
"'links'",
"not",
"in",
"system",
"[",
"'Oem'",
"]",
"[",
"'Hp'",
"]",
... | Change secure boot settings on the server. | [
"Change",
"secure",
"boot",
"settings",
"on",
"the",
"server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L655-L683 | train | 41,604 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations.clear_secure_boot_keys | def clear_secure_boot_keys(self):
"""Reset all keys.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
if self._is_boot_mode_uefi():
self._change_secure_boot_settings('ResetAllKeys', True)
else:
msg = ('System is not in UEFI boot mode. "SecureBoot" related '
'resources cannot be changed.')
raise exception.IloCommandNotSupportedInBiosError(msg) | python | def clear_secure_boot_keys(self):
"""Reset all keys.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
if self._is_boot_mode_uefi():
self._change_secure_boot_settings('ResetAllKeys', True)
else:
msg = ('System is not in UEFI boot mode. "SecureBoot" related '
'resources cannot be changed.')
raise exception.IloCommandNotSupportedInBiosError(msg) | [
"def",
"clear_secure_boot_keys",
"(",
"self",
")",
":",
"if",
"self",
".",
"_is_boot_mode_uefi",
"(",
")",
":",
"self",
".",
"_change_secure_boot_settings",
"(",
"'ResetAllKeys'",
",",
"True",
")",
"else",
":",
"msg",
"=",
"(",
"'System is not in UEFI boot mode. \... | Reset all keys.
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Reset",
"all",
"keys",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L766-L778 | train | 41,605 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._perform_power_op | def _perform_power_op(self, oper):
"""Perform requested power operation.
:param oper: Type of power button press to simulate.
Supported values: 'ON', 'ForceOff', 'ForceRestart' and
'Nmi'
:raises: IloError, on an error from iLO.
"""
power_settings = {"Action": "Reset",
"ResetType": oper}
systems_uri = "/rest/v1/Systems/1"
status, headers, response = self._rest_post(systems_uri, None,
power_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | python | def _perform_power_op(self, oper):
"""Perform requested power operation.
:param oper: Type of power button press to simulate.
Supported values: 'ON', 'ForceOff', 'ForceRestart' and
'Nmi'
:raises: IloError, on an error from iLO.
"""
power_settings = {"Action": "Reset",
"ResetType": oper}
systems_uri = "/rest/v1/Systems/1"
status, headers, response = self._rest_post(systems_uri, None,
power_settings)
if status >= 300:
msg = self._get_extended_error(response)
raise exception.IloError(msg) | [
"def",
"_perform_power_op",
"(",
"self",
",",
"oper",
")",
":",
"power_settings",
"=",
"{",
"\"Action\"",
":",
"\"Reset\"",
",",
"\"ResetType\"",
":",
"oper",
"}",
"systems_uri",
"=",
"\"/rest/v1/Systems/1\"",
"status",
",",
"headers",
",",
"response",
"=",
"s... | Perform requested power operation.
:param oper: Type of power button press to simulate.
Supported values: 'ON', 'ForceOff', 'ForceRestart' and
'Nmi'
:raises: IloError, on an error from iLO. | [
"Perform",
"requested",
"power",
"operation",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L790-L807 | train | 41,606 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._retry_until_powered_on | def _retry_until_powered_on(self, power):
"""This method retries power on operation.
:param: power : target power state
"""
# If the system is in the same power state as
# requested by the user, it gives the error
# InvalidOperationForSystemState. To avoid this error
# the power state is checked before power on
# operation is performed.
status = self.get_host_power_status()
if (status != power):
self._perform_power_op(POWER_STATE[power])
return self.get_host_power_status()
else:
return status | python | def _retry_until_powered_on(self, power):
"""This method retries power on operation.
:param: power : target power state
"""
# If the system is in the same power state as
# requested by the user, it gives the error
# InvalidOperationForSystemState. To avoid this error
# the power state is checked before power on
# operation is performed.
status = self.get_host_power_status()
if (status != power):
self._perform_power_op(POWER_STATE[power])
return self.get_host_power_status()
else:
return status | [
"def",
"_retry_until_powered_on",
"(",
"self",
",",
"power",
")",
":",
"# If the system is in the same power state as",
"# requested by the user, it gives the error",
"# InvalidOperationForSystemState. To avoid this error",
"# the power state is checked before power on",
"# operation is perf... | This method retries power on operation.
:param: power : target power state | [
"This",
"method",
"retries",
"power",
"on",
"operation",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L855-L870 | train | 41,607 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations.get_http_boot_url | def get_http_boot_url(self):
"""Request the http boot url from system in uefi boot mode.
:returns: URL for http boot
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
if(self._is_boot_mode_uefi() is True):
return self._get_bios_setting('UefiShellStartupUrl')
else:
msg = 'get_http_boot_url is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | python | def get_http_boot_url(self):
"""Request the http boot url from system in uefi boot mode.
:returns: URL for http boot
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
if(self._is_boot_mode_uefi() is True):
return self._get_bios_setting('UefiShellStartupUrl')
else:
msg = 'get_http_boot_url is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | [
"def",
"get_http_boot_url",
"(",
"self",
")",
":",
"if",
"(",
"self",
".",
"_is_boot_mode_uefi",
"(",
")",
"is",
"True",
")",
":",
"return",
"self",
".",
"_get_bios_setting",
"(",
"'UefiShellStartupUrl'",
")",
"else",
":",
"msg",
"=",
"'get_http_boot_url is no... | Request the http boot url from system in uefi boot mode.
:returns: URL for http boot
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | [
"Request",
"the",
"http",
"boot",
"url",
"from",
"system",
"in",
"uefi",
"boot",
"mode",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L897-L909 | train | 41,608 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations.set_http_boot_url | def set_http_boot_url(self, url):
"""Set url to the UefiShellStartupUrl to the system in uefi boot mode.
:param url: URL for http boot
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
if(self._is_boot_mode_uefi() is True):
self._change_bios_setting({'UefiShellStartupUrl': url})
else:
msg = 'set_http_boot_url is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | python | def set_http_boot_url(self, url):
"""Set url to the UefiShellStartupUrl to the system in uefi boot mode.
:param url: URL for http boot
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode.
"""
if(self._is_boot_mode_uefi() is True):
self._change_bios_setting({'UefiShellStartupUrl': url})
else:
msg = 'set_http_boot_url is not supported in the BIOS boot mode'
raise exception.IloCommandNotSupportedInBiosError(msg) | [
"def",
"set_http_boot_url",
"(",
"self",
",",
"url",
")",
":",
"if",
"(",
"self",
".",
"_is_boot_mode_uefi",
"(",
")",
"is",
"True",
")",
":",
"self",
".",
"_change_bios_setting",
"(",
"{",
"'UefiShellStartupUrl'",
":",
"url",
"}",
")",
"else",
":",
"msg... | Set url to the UefiShellStartupUrl to the system in uefi boot mode.
:param url: URL for http boot
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedInBiosError, if the system is
in the bios boot mode. | [
"Set",
"url",
"to",
"the",
"UefiShellStartupUrl",
"to",
"the",
"system",
"in",
"uefi",
"boot",
"mode",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L911-L923 | train | 41,609 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_ilo_details | def _get_ilo_details(self):
"""Gets iLO details
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if iLO is not up after reset.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
manager_uri = '/rest/v1/Managers/1'
status, headers, manager = self._rest_get(manager_uri)
if status != 200:
msg = self._get_extended_error(manager)
raise exception.IloError(msg)
# verify expected type
mtype = self._get_type(manager)
if (mtype not in ['Manager.0', 'Manager.1']):
msg = "%s is not a valid Manager type " % mtype
raise exception.IloError(msg)
return manager, manager_uri | python | def _get_ilo_details(self):
"""Gets iLO details
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if iLO is not up after reset.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
manager_uri = '/rest/v1/Managers/1'
status, headers, manager = self._rest_get(manager_uri)
if status != 200:
msg = self._get_extended_error(manager)
raise exception.IloError(msg)
# verify expected type
mtype = self._get_type(manager)
if (mtype not in ['Manager.0', 'Manager.1']):
msg = "%s is not a valid Manager type " % mtype
raise exception.IloError(msg)
return manager, manager_uri | [
"def",
"_get_ilo_details",
"(",
"self",
")",
":",
"manager_uri",
"=",
"'/rest/v1/Managers/1'",
"status",
",",
"headers",
",",
"manager",
"=",
"self",
".",
"_rest_get",
"(",
"manager_uri",
")",
"if",
"status",
"!=",
"200",
":",
"msg",
"=",
"self",
".",
"_ge... | Gets iLO details
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if iLO is not up after reset.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Gets",
"iLO",
"details"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1110-L1131 | train | 41,610 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations.reset_ilo | def reset_ilo(self):
"""Resets the iLO.
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if iLO is not up after reset.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
manager, reset_uri = self._get_ilo_details()
action = {'Action': 'Reset'}
# perform the POST
status, headers, response = self._rest_post(reset_uri, None, action)
if(status != 200):
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# Check if the iLO is up again.
common.wait_for_ilo_after_reset(self) | python | def reset_ilo(self):
"""Resets the iLO.
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if iLO is not up after reset.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
manager, reset_uri = self._get_ilo_details()
action = {'Action': 'Reset'}
# perform the POST
status, headers, response = self._rest_post(reset_uri, None, action)
if(status != 200):
msg = self._get_extended_error(response)
raise exception.IloError(msg)
# Check if the iLO is up again.
common.wait_for_ilo_after_reset(self) | [
"def",
"reset_ilo",
"(",
"self",
")",
":",
"manager",
",",
"reset_uri",
"=",
"self",
".",
"_get_ilo_details",
"(",
")",
"action",
"=",
"{",
"'Action'",
":",
"'Reset'",
"}",
"# perform the POST",
"status",
",",
"headers",
",",
"response",
"=",
"self",
".",
... | Resets the iLO.
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if iLO is not up after reset.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Resets",
"the",
"iLO",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1133-L1152 | train | 41,611 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_vm_device_status | def _get_vm_device_status(self, device='FLOPPY'):
"""Returns the given virtual media device status and device URI
:param device: virtual media device to be queried
:returns json format virtual media device status and its URI
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
valid_devices = {'FLOPPY': 'floppy',
'CDROM': 'cd'}
# Check if the input is valid
if device not in valid_devices:
raise exception.IloInvalidInputError(
"Invalid device. Valid devices: FLOPPY or CDROM.")
manager, uri = self._get_ilo_details()
try:
vmedia_uri = manager['links']['VirtualMedia']['href']
except KeyError:
msg = ('"VirtualMedia" section in Manager/links does not exist')
raise exception.IloCommandNotSupportedError(msg)
for status, hds, vmed, memberuri in self._get_collection(vmedia_uri):
status, headers, response = self._rest_get(memberuri)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
if (valid_devices[device] in
[item.lower() for item in response['MediaTypes']]):
vm_device_uri = response['links']['self']['href']
return response, vm_device_uri
# Requested device not found
msg = ('Virtualmedia device "' + device + '" is not'
' found on this system.')
raise exception.IloError(msg) | python | def _get_vm_device_status(self, device='FLOPPY'):
"""Returns the given virtual media device status and device URI
:param device: virtual media device to be queried
:returns json format virtual media device status and its URI
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
valid_devices = {'FLOPPY': 'floppy',
'CDROM': 'cd'}
# Check if the input is valid
if device not in valid_devices:
raise exception.IloInvalidInputError(
"Invalid device. Valid devices: FLOPPY or CDROM.")
manager, uri = self._get_ilo_details()
try:
vmedia_uri = manager['links']['VirtualMedia']['href']
except KeyError:
msg = ('"VirtualMedia" section in Manager/links does not exist')
raise exception.IloCommandNotSupportedError(msg)
for status, hds, vmed, memberuri in self._get_collection(vmedia_uri):
status, headers, response = self._rest_get(memberuri)
if status != 200:
msg = self._get_extended_error(response)
raise exception.IloError(msg)
if (valid_devices[device] in
[item.lower() for item in response['MediaTypes']]):
vm_device_uri = response['links']['self']['href']
return response, vm_device_uri
# Requested device not found
msg = ('Virtualmedia device "' + device + '" is not'
' found on this system.')
raise exception.IloError(msg) | [
"def",
"_get_vm_device_status",
"(",
"self",
",",
"device",
"=",
"'FLOPPY'",
")",
":",
"valid_devices",
"=",
"{",
"'FLOPPY'",
":",
"'floppy'",
",",
"'CDROM'",
":",
"'cd'",
"}",
"# Check if the input is valid",
"if",
"device",
"not",
"in",
"valid_devices",
":",
... | Returns the given virtual media device status and device URI
:param device: virtual media device to be queried
:returns json format virtual media device status and its URI
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Returns",
"the",
"given",
"virtual",
"media",
"device",
"status",
"and",
"device",
"URI"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1306-L1344 | train | 41,612 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_persistent_boot_devices | def _get_persistent_boot_devices(self):
"""Get details of persistent boot devices, its order
:returns: List of dictionary of boot sources and
list of boot device order
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
# Check if the BIOS resource if exists.
headers_bios, bios_uri, bios_settings = self._check_bios_resource()
# Get the Boot resource.
boot_settings = self._get_bios_boot_resource(bios_settings)
# Get the BootSources resource
try:
boot_sources = boot_settings['BootSources']
except KeyError:
msg = ("BootSources resource not found.")
raise exception.IloError(msg)
try:
boot_order = boot_settings['PersistentBootConfigOrder']
except KeyError:
msg = ("PersistentBootConfigOrder resource not found.")
raise exception.IloCommandNotSupportedError(msg)
return boot_sources, boot_order | python | def _get_persistent_boot_devices(self):
"""Get details of persistent boot devices, its order
:returns: List of dictionary of boot sources and
list of boot device order
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server.
"""
# Check if the BIOS resource if exists.
headers_bios, bios_uri, bios_settings = self._check_bios_resource()
# Get the Boot resource.
boot_settings = self._get_bios_boot_resource(bios_settings)
# Get the BootSources resource
try:
boot_sources = boot_settings['BootSources']
except KeyError:
msg = ("BootSources resource not found.")
raise exception.IloError(msg)
try:
boot_order = boot_settings['PersistentBootConfigOrder']
except KeyError:
msg = ("PersistentBootConfigOrder resource not found.")
raise exception.IloCommandNotSupportedError(msg)
return boot_sources, boot_order | [
"def",
"_get_persistent_boot_devices",
"(",
"self",
")",
":",
"# Check if the BIOS resource if exists.",
"headers_bios",
",",
"bios_uri",
",",
"bios_settings",
"=",
"self",
".",
"_check_bios_resource",
"(",
")",
"# Get the Boot resource.",
"boot_settings",
"=",
"self",
".... | Get details of persistent boot devices, its order
:returns: List of dictionary of boot sources and
list of boot device order
:raises: IloError, on an error from iLO.
:raises: IloCommandNotSupportedError, if the command is not supported
on the server. | [
"Get",
"details",
"of",
"persistent",
"boot",
"devices",
"its",
"order"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1497-L1525 | train | 41,613 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_firmware_update_service_resource | def _get_firmware_update_service_resource(self):
"""Gets the firmware update service uri.
:returns: firmware update service uri
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, for not finding the uri
"""
manager, uri = self._get_ilo_details()
try:
fw_uri = manager['Oem']['Hp']['links']['UpdateService']['href']
except KeyError:
msg = ("Firmware Update Service resource not found.")
raise exception.IloCommandNotSupportedError(msg)
return fw_uri | python | def _get_firmware_update_service_resource(self):
"""Gets the firmware update service uri.
:returns: firmware update service uri
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, for not finding the uri
"""
manager, uri = self._get_ilo_details()
try:
fw_uri = manager['Oem']['Hp']['links']['UpdateService']['href']
except KeyError:
msg = ("Firmware Update Service resource not found.")
raise exception.IloCommandNotSupportedError(msg)
return fw_uri | [
"def",
"_get_firmware_update_service_resource",
"(",
"self",
")",
":",
"manager",
",",
"uri",
"=",
"self",
".",
"_get_ilo_details",
"(",
")",
"try",
":",
"fw_uri",
"=",
"manager",
"[",
"'Oem'",
"]",
"[",
"'Hp'",
"]",
"[",
"'links'",
"]",
"[",
"'UpdateServi... | Gets the firmware update service uri.
:returns: firmware update service uri
:raises: IloError, on an error from iLO.
:raises: IloConnectionError, if not able to reach iLO.
:raises: IloCommandNotSupportedError, for not finding the uri | [
"Gets",
"the",
"firmware",
"update",
"service",
"uri",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1686-L1700 | train | 41,614 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_tpm_capability | def _get_tpm_capability(self):
"""Retrieves if server is TPM capable or not.
:returns True if TPM is Present else False
"""
tpm_values = {"NotPresent": False,
"PresentDisabled": True,
"PresentEnabled": True}
try:
tpm_state = self._get_bios_setting('TpmState')
except exception.IloCommandNotSupportedError:
tpm_state = "NotPresent"
tpm_result = tpm_values[tpm_state]
return tpm_result | python | def _get_tpm_capability(self):
"""Retrieves if server is TPM capable or not.
:returns True if TPM is Present else False
"""
tpm_values = {"NotPresent": False,
"PresentDisabled": True,
"PresentEnabled": True}
try:
tpm_state = self._get_bios_setting('TpmState')
except exception.IloCommandNotSupportedError:
tpm_state = "NotPresent"
tpm_result = tpm_values[tpm_state]
return tpm_result | [
"def",
"_get_tpm_capability",
"(",
"self",
")",
":",
"tpm_values",
"=",
"{",
"\"NotPresent\"",
":",
"False",
",",
"\"PresentDisabled\"",
":",
"True",
",",
"\"PresentEnabled\"",
":",
"True",
"}",
"try",
":",
"tpm_state",
"=",
"self",
".",
"_get_bios_setting",
"... | Retrieves if server is TPM capable or not.
:returns True if TPM is Present else False | [
"Retrieves",
"if",
"server",
"is",
"TPM",
"capable",
"or",
"not",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1785-L1798 | train | 41,615 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_cpu_virtualization | def _get_cpu_virtualization(self):
"""get cpu virtualization status."""
try:
cpu_vt = self._get_bios_setting('ProcVirtualization')
except exception.IloCommandNotSupportedError:
return False
if cpu_vt == 'Enabled':
vt_status = True
else:
vt_status = False
return vt_status | python | def _get_cpu_virtualization(self):
"""get cpu virtualization status."""
try:
cpu_vt = self._get_bios_setting('ProcVirtualization')
except exception.IloCommandNotSupportedError:
return False
if cpu_vt == 'Enabled':
vt_status = True
else:
vt_status = False
return vt_status | [
"def",
"_get_cpu_virtualization",
"(",
"self",
")",
":",
"try",
":",
"cpu_vt",
"=",
"self",
".",
"_get_bios_setting",
"(",
"'ProcVirtualization'",
")",
"except",
"exception",
".",
"IloCommandNotSupportedError",
":",
"return",
"False",
"if",
"cpu_vt",
"==",
"'Enabl... | get cpu virtualization status. | [
"get",
"cpu",
"virtualization",
"status",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1800-L1810 | train | 41,616 |
openstack/proliantutils | proliantutils/ilo/ris.py | RISOperations._get_nvdimm_n_status | def _get_nvdimm_n_status(self):
"""Get status of NVDIMM_N.
:returns: True if NVDIMM_N is present and enabled, False otherwise.
"""
try:
nvdimm_n_status = self._get_bios_setting('NvDimmNMemFunctionality')
if nvdimm_n_status == 'Enabled':
nvn_status = True
else:
nvn_status = False
except exception.IloCommandNotSupportedError:
nvn_status = False
return nvn_status | python | def _get_nvdimm_n_status(self):
"""Get status of NVDIMM_N.
:returns: True if NVDIMM_N is present and enabled, False otherwise.
"""
try:
nvdimm_n_status = self._get_bios_setting('NvDimmNMemFunctionality')
if nvdimm_n_status == 'Enabled':
nvn_status = True
else:
nvn_status = False
except exception.IloCommandNotSupportedError:
nvn_status = False
return nvn_status | [
"def",
"_get_nvdimm_n_status",
"(",
"self",
")",
":",
"try",
":",
"nvdimm_n_status",
"=",
"self",
".",
"_get_bios_setting",
"(",
"'NvDimmNMemFunctionality'",
")",
"if",
"nvdimm_n_status",
"==",
"'Enabled'",
":",
"nvn_status",
"=",
"True",
"else",
":",
"nvn_status"... | Get status of NVDIMM_N.
:returns: True if NVDIMM_N is present and enabled, False otherwise. | [
"Get",
"status",
"of",
"NVDIMM_N",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ris.py#L1812-L1825 | train | 41,617 |
NuGrid/NuGridPy | nugridpy/nugridse.py | _obsolete_plot_el_abund_marco | def _obsolete_plot_el_abund_marco(directory,name_h5_file,mass_range,cycle,logic_stable,i_decay,file_solar,solar_factor,symbol='ko'):
"""
Interface to plot elements abundances averaged over mass_range.
Parameters
----------
directory : string
Location of h5 file to plot. Needed for plot_tools.
name_h5_file : string
Name of h5 file. Needed for plot_tools.
mass_range : list
A 1x2 array required to plot data in a certain mass range. Needed for
_read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco.
logic_stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2, then
plot decayed. Make sense only if stable is true.
file_solar : string
File where to take solar abundances.
solar_factor : float
value to correct initial abundances to solar, e.g. for Z=0.01
and AG89 solar_factor = 2.
See Also
--------
se._read_iso_abund_marco()
"""
# provide library for Z versus element names, and Z for elements
u.give_zip_element_z_and_names()
# solar abundances are read here
u.solar(file_solar,solar_factor)
# from here I have average abundances in mass_range to plot
average_iso_abund_marco(mass_range,cycle,logic_stable,i_decay)
# element abundances are calculated here
mass_fractions_array_decayed = average_mass_frac_decay
mass_fractions_array_not_decayed = average_mass_frac
u.element_abund_marco(i_decay,stable,jjdum,mass_fractions_array_not_decayed,mass_fractions_array_decayed)
fig = pl.figure() # Figure object
ax = fig.add_subplot(1,1,1) # Axes object: one row, one column, first plot (one plot!)
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(10)
ax.xaxis.set_major_locator(xmajorlocator)
ax.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(0.1)
ymajorlocator = MultipleLocator(1)
ax.yaxis.set_major_locator(ymajorlocator)
ax.yaxis.set_minor_locator(yminorlocator)
ax.set_yscale('log')
if not logic_stable:
for i in range(u.z_bismuth):
pl.plot(z_for_elem[i],elem_prod_fac[i],symbol,markersize=10.)
pl.xlabel('$Atomic$ $number$', fontsize=20)
pl.ylabel('$X_{i}/X_{sun}$', fontsize=20)
pl.ylim(1.0e-2,1000.)
pl.xlim(0,95)
elif logic_stable:
for i in range(u.z_bismuth):
if index_stable[i] == 1:
continue
#pl.plot(z_for_elem[i],elem_prod_fac[i],'ko')
if i_decay == 2:
for i in range(u.z_bismuth):
if index_stable[i] == 1:
pl.plot(z_for_elem[i],elem_prod_fac_decayed[i],symbol,markersize=10.)
pl.xlabel('$Atomic$ $number$', fontsize=20)
pl.ylabel('$X_{i}/X_{sun}$', fontsize=20)
pl.ylim(1.0e-2,1000.)
pl.xlim(0,95)
pl.grid()
pl.show() | python | def _obsolete_plot_el_abund_marco(directory,name_h5_file,mass_range,cycle,logic_stable,i_decay,file_solar,solar_factor,symbol='ko'):
"""
Interface to plot elements abundances averaged over mass_range.
Parameters
----------
directory : string
Location of h5 file to plot. Needed for plot_tools.
name_h5_file : string
Name of h5 file. Needed for plot_tools.
mass_range : list
A 1x2 array required to plot data in a certain mass range. Needed for
_read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco.
logic_stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2, then
plot decayed. Make sense only if stable is true.
file_solar : string
File where to take solar abundances.
solar_factor : float
value to correct initial abundances to solar, e.g. for Z=0.01
and AG89 solar_factor = 2.
See Also
--------
se._read_iso_abund_marco()
"""
# provide library for Z versus element names, and Z for elements
u.give_zip_element_z_and_names()
# solar abundances are read here
u.solar(file_solar,solar_factor)
# from here I have average abundances in mass_range to plot
average_iso_abund_marco(mass_range,cycle,logic_stable,i_decay)
# element abundances are calculated here
mass_fractions_array_decayed = average_mass_frac_decay
mass_fractions_array_not_decayed = average_mass_frac
u.element_abund_marco(i_decay,stable,jjdum,mass_fractions_array_not_decayed,mass_fractions_array_decayed)
fig = pl.figure() # Figure object
ax = fig.add_subplot(1,1,1) # Axes object: one row, one column, first plot (one plot!)
# Tick marks
xminorlocator = MultipleLocator(1)
xmajorlocator = MultipleLocator(10)
ax.xaxis.set_major_locator(xmajorlocator)
ax.xaxis.set_minor_locator(xminorlocator)
yminorlocator = MultipleLocator(0.1)
ymajorlocator = MultipleLocator(1)
ax.yaxis.set_major_locator(ymajorlocator)
ax.yaxis.set_minor_locator(yminorlocator)
ax.set_yscale('log')
if not logic_stable:
for i in range(u.z_bismuth):
pl.plot(z_for_elem[i],elem_prod_fac[i],symbol,markersize=10.)
pl.xlabel('$Atomic$ $number$', fontsize=20)
pl.ylabel('$X_{i}/X_{sun}$', fontsize=20)
pl.ylim(1.0e-2,1000.)
pl.xlim(0,95)
elif logic_stable:
for i in range(u.z_bismuth):
if index_stable[i] == 1:
continue
#pl.plot(z_for_elem[i],elem_prod_fac[i],'ko')
if i_decay == 2:
for i in range(u.z_bismuth):
if index_stable[i] == 1:
pl.plot(z_for_elem[i],elem_prod_fac_decayed[i],symbol,markersize=10.)
pl.xlabel('$Atomic$ $number$', fontsize=20)
pl.ylabel('$X_{i}/X_{sun}$', fontsize=20)
pl.ylim(1.0e-2,1000.)
pl.xlim(0,95)
pl.grid()
pl.show() | [
"def",
"_obsolete_plot_el_abund_marco",
"(",
"directory",
",",
"name_h5_file",
",",
"mass_range",
",",
"cycle",
",",
"logic_stable",
",",
"i_decay",
",",
"file_solar",
",",
"solar_factor",
",",
"symbol",
"=",
"'ko'",
")",
":",
"# provide library for Z versus element n... | Interface to plot elements abundances averaged over mass_range.
Parameters
----------
directory : string
Location of h5 file to plot. Needed for plot_tools.
name_h5_file : string
Name of h5 file. Needed for plot_tools.
mass_range : list
A 1x2 array required to plot data in a certain mass range. Needed for
_read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco.
logic_stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2, then
plot decayed. Make sense only if stable is true.
file_solar : string
File where to take solar abundances.
solar_factor : float
value to correct initial abundances to solar, e.g. for Z=0.01
and AG89 solar_factor = 2.
See Also
--------
se._read_iso_abund_marco() | [
"Interface",
"to",
"plot",
"elements",
"abundances",
"averaged",
"over",
"mass_range",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L4075-L4162 | train | 41,618 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.get | def get(self, cycle_list, dataitem=None, isotope=None, sparse=1):
"""
Simple function that simply calls h5T.py get method. There
are three ways to call this function.
Parameters
----------
cycle_list : string, list
If cycle_list is a string, then get interpates the argument
cycle_list as a dataitem and fetches the dataitem for all
cycles.
If cycle_list is a list, then get fetches the dataitem for
the cycles in the list.
dataitem : string, optional
fetches the dataitem from the list of cycles. If dataitem
is None, then cycle_list must be a string and will be used
as dataitem. If dataitem is an isotope in the form 'H-2',
it then returns the result of,
>>> self.get(cycle_list,'iso_massf',dataitem)
The default is None.
isotope : string, optional
The name of the isotope to fetch, it must be in the form
'H-2'. If isotope is None, then cycle_list or dataitem
must be a string. The default is None.
sparse : integer, optional
Implements a sparsity factor on the fetched data. The
default is 1.
Notes
-----
Calling the get method directly in the form,
>>> self.get(cycle_list,'iso_massf',dataitem)
is depricated, and only included for compatibility.
"""
return self.se.get(cycle_list,dataitem,isotope,sparse) | python | def get(self, cycle_list, dataitem=None, isotope=None, sparse=1):
"""
Simple function that simply calls h5T.py get method. There
are three ways to call this function.
Parameters
----------
cycle_list : string, list
If cycle_list is a string, then get interpates the argument
cycle_list as a dataitem and fetches the dataitem for all
cycles.
If cycle_list is a list, then get fetches the dataitem for
the cycles in the list.
dataitem : string, optional
fetches the dataitem from the list of cycles. If dataitem
is None, then cycle_list must be a string and will be used
as dataitem. If dataitem is an isotope in the form 'H-2',
it then returns the result of,
>>> self.get(cycle_list,'iso_massf',dataitem)
The default is None.
isotope : string, optional
The name of the isotope to fetch, it must be in the form
'H-2'. If isotope is None, then cycle_list or dataitem
must be a string. The default is None.
sparse : integer, optional
Implements a sparsity factor on the fetched data. The
default is 1.
Notes
-----
Calling the get method directly in the form,
>>> self.get(cycle_list,'iso_massf',dataitem)
is depricated, and only included for compatibility.
"""
return self.se.get(cycle_list,dataitem,isotope,sparse) | [
"def",
"get",
"(",
"self",
",",
"cycle_list",
",",
"dataitem",
"=",
"None",
",",
"isotope",
"=",
"None",
",",
"sparse",
"=",
"1",
")",
":",
"return",
"self",
".",
"se",
".",
"get",
"(",
"cycle_list",
",",
"dataitem",
",",
"isotope",
",",
"sparse",
... | Simple function that simply calls h5T.py get method. There
are three ways to call this function.
Parameters
----------
cycle_list : string, list
If cycle_list is a string, then get interpates the argument
cycle_list as a dataitem and fetches the dataitem for all
cycles.
If cycle_list is a list, then get fetches the dataitem for
the cycles in the list.
dataitem : string, optional
fetches the dataitem from the list of cycles. If dataitem
is None, then cycle_list must be a string and will be used
as dataitem. If dataitem is an isotope in the form 'H-2',
it then returns the result of,
>>> self.get(cycle_list,'iso_massf',dataitem)
The default is None.
isotope : string, optional
The name of the isotope to fetch, it must be in the form
'H-2'. If isotope is None, then cycle_list or dataitem
must be a string. The default is None.
sparse : integer, optional
Implements a sparsity factor on the fetched data. The
default is 1.
Notes
-----
Calling the get method directly in the form,
>>> self.get(cycle_list,'iso_massf',dataitem)
is depricated, and only included for compatibility. | [
"Simple",
"function",
"that",
"simply",
"calls",
"h5T",
".",
"py",
"get",
"method",
".",
"There",
"are",
"three",
"ways",
"to",
"call",
"this",
"function",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L327-L367 | train | 41,619 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.get_elemental_abunds | def get_elemental_abunds(self,cycle,index=None):
"""
returns the elemental abundances for one cycle, either
for the whole star or a specific zone depending upon
the value of 'index'.
Parameters
----------
cycle : string or integer
Model to get the abundances for.
index : integer or list, optional
zone number for which to get elemental abundances. If
None the entire abundance profile is returned. If a 1x2
list, the abundances are returned between indices of
index[0] and index[1].
The default is None.
"""
isoabunds=self.se.get(cycle,'iso_massf')
A=array(self.se.A)
Z=array(self.se.Z)
names=self.se.isos
Zuq=list(set(Z)) # list of unique Zs
Zuq.sort()
if index==None:
index=[0,len(isoabunds)]
if type(index)==list:
elemabunds=[]
for zone in range(index[0],index[1]):
percent=int((zone-index[0])*100./(index[1]-index[0]))
sys.stdout.flush()
sys.stdout.write("\rgetting elemental abundances " + "...%d%%" % percent)
elemabunds.append([sum(isoabunds[zone][where(Z==iZ)]) for iZ in Zuq])
else:
elemabunds=[sum(isoabunds[index][where(Z==iZ)]) for iZ in Zuq]
return elemabunds | python | def get_elemental_abunds(self,cycle,index=None):
"""
returns the elemental abundances for one cycle, either
for the whole star or a specific zone depending upon
the value of 'index'.
Parameters
----------
cycle : string or integer
Model to get the abundances for.
index : integer or list, optional
zone number for which to get elemental abundances. If
None the entire abundance profile is returned. If a 1x2
list, the abundances are returned between indices of
index[0] and index[1].
The default is None.
"""
isoabunds=self.se.get(cycle,'iso_massf')
A=array(self.se.A)
Z=array(self.se.Z)
names=self.se.isos
Zuq=list(set(Z)) # list of unique Zs
Zuq.sort()
if index==None:
index=[0,len(isoabunds)]
if type(index)==list:
elemabunds=[]
for zone in range(index[0],index[1]):
percent=int((zone-index[0])*100./(index[1]-index[0]))
sys.stdout.flush()
sys.stdout.write("\rgetting elemental abundances " + "...%d%%" % percent)
elemabunds.append([sum(isoabunds[zone][where(Z==iZ)]) for iZ in Zuq])
else:
elemabunds=[sum(isoabunds[index][where(Z==iZ)]) for iZ in Zuq]
return elemabunds | [
"def",
"get_elemental_abunds",
"(",
"self",
",",
"cycle",
",",
"index",
"=",
"None",
")",
":",
"isoabunds",
"=",
"self",
".",
"se",
".",
"get",
"(",
"cycle",
",",
"'iso_massf'",
")",
"A",
"=",
"array",
"(",
"self",
".",
"se",
".",
"A",
")",
"Z",
... | returns the elemental abundances for one cycle, either
for the whole star or a specific zone depending upon
the value of 'index'.
Parameters
----------
cycle : string or integer
Model to get the abundances for.
index : integer or list, optional
zone number for which to get elemental abundances. If
None the entire abundance profile is returned. If a 1x2
list, the abundances are returned between indices of
index[0] and index[1].
The default is None. | [
"returns",
"the",
"elemental",
"abundances",
"for",
"one",
"cycle",
"either",
"for",
"the",
"whole",
"star",
"or",
"a",
"specific",
"zone",
"depending",
"upon",
"the",
"value",
"of",
"index",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L423-L462 | train | 41,620 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.plot_prof_1 | def plot_prof_1(self, mod, species, xlim1, xlim2, ylim1, ylim2,
symbol=None):
"""
plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
symbol : string, optional
Which symbol you want to use. If None symbol is set to '-'.
The default is None.
"""
DataPlot.plot_prof_1(self,species,mod,xlim1,xlim2,ylim1,ylim2,symbol)
"""
tot_mass=self.se.get(mod,'total_mass')
age=self.se.get(mod,'age')
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'iso_massf',species)
pyl.plot(mass,np.log10(Xspecies),'-',label=species)
pyl.xlim(xlim1,xlim2)
pyl.ylim(ylim1,ylim2)
pyl.legend()
pl.xlabel('$Mass$ $coordinate$', fontsize=20)
pl.ylabel('$X_{i}$', fontsize=20)
pl.title('Mass='+str(tot_mass)+', Time='+str(age)+' years, cycle='+str(mod))
""" | python | def plot_prof_1(self, mod, species, xlim1, xlim2, ylim1, ylim2,
symbol=None):
"""
plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
symbol : string, optional
Which symbol you want to use. If None symbol is set to '-'.
The default is None.
"""
DataPlot.plot_prof_1(self,species,mod,xlim1,xlim2,ylim1,ylim2,symbol)
"""
tot_mass=self.se.get(mod,'total_mass')
age=self.se.get(mod,'age')
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'iso_massf',species)
pyl.plot(mass,np.log10(Xspecies),'-',label=species)
pyl.xlim(xlim1,xlim2)
pyl.ylim(ylim1,ylim2)
pyl.legend()
pl.xlabel('$Mass$ $coordinate$', fontsize=20)
pl.ylabel('$X_{i}$', fontsize=20)
pl.title('Mass='+str(tot_mass)+', Time='+str(age)+' years, cycle='+str(mod))
""" | [
"def",
"plot_prof_1",
"(",
"self",
",",
"mod",
",",
"species",
",",
"xlim1",
",",
"xlim2",
",",
"ylim1",
",",
"ylim2",
",",
"symbol",
"=",
"None",
")",
":",
"DataPlot",
".",
"plot_prof_1",
"(",
"self",
",",
"species",
",",
"mod",
",",
"xlim1",
",",
... | plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
symbol : string, optional
Which symbol you want to use. If None symbol is set to '-'.
The default is None. | [
"plot",
"one",
"species",
"for",
"cycle",
"between",
"xlim1",
"and",
"xlim2"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L465-L499 | train | 41,621 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.plot_prof_2 | def plot_prof_2(self, mod, species, xlim1, xlim2):
"""
Plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range.
"""
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'yps',species)
pyl.plot(mass,Xspecies,'-',label=str(mod)+', '+species)
pyl.xlim(xlim1,xlim2)
pyl.legend() | python | def plot_prof_2(self, mod, species, xlim1, xlim2):
"""
Plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range.
"""
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'yps',species)
pyl.plot(mass,Xspecies,'-',label=str(mod)+', '+species)
pyl.xlim(xlim1,xlim2)
pyl.legend() | [
"def",
"plot_prof_2",
"(",
"self",
",",
"mod",
",",
"species",
",",
"xlim1",
",",
"xlim2",
")",
":",
"mass",
"=",
"self",
".",
"se",
".",
"get",
"(",
"mod",
",",
"'mass'",
")",
"Xspecies",
"=",
"self",
".",
"se",
".",
"get",
"(",
"mod",
",",
"'... | Plot one species for cycle between xlim1 and xlim2
Parameters
----------
mod : string or integer
Model to plot, same as cycle number.
species : list
Which species to plot.
xlim1, xlim2 : float
Mass coordinate range. | [
"Plot",
"one",
"species",
"for",
"cycle",
"between",
"xlim1",
"and",
"xlim2"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L501-L521 | train | 41,622 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.plot_prof_sparse | def plot_prof_sparse(self, mod, species, xlim1, xlim2, ylim1, ylim2,
sparse, symbol):
"""
plot one species for cycle between xlim1 and xlim2.
Parameters
----------
species : list
which species to plot.
mod : string or integer
Model (cycle) to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
sparse : integer
Sparsity factor for points.
symbol : string
which symbol you want to use?
"""
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'yps',species)
pyl.plot(mass[0:len(mass):sparse],np.log10(Xspecies[0:len(Xspecies):sparse]),symbol)
pyl.xlim(xlim1,xlim2)
pyl.ylim(ylim1,ylim2)
pyl.legend() | python | def plot_prof_sparse(self, mod, species, xlim1, xlim2, ylim1, ylim2,
sparse, symbol):
"""
plot one species for cycle between xlim1 and xlim2.
Parameters
----------
species : list
which species to plot.
mod : string or integer
Model (cycle) to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
sparse : integer
Sparsity factor for points.
symbol : string
which symbol you want to use?
"""
mass=self.se.get(mod,'mass')
Xspecies=self.se.get(mod,'yps',species)
pyl.plot(mass[0:len(mass):sparse],np.log10(Xspecies[0:len(Xspecies):sparse]),symbol)
pyl.xlim(xlim1,xlim2)
pyl.ylim(ylim1,ylim2)
pyl.legend() | [
"def",
"plot_prof_sparse",
"(",
"self",
",",
"mod",
",",
"species",
",",
"xlim1",
",",
"xlim2",
",",
"ylim1",
",",
"ylim2",
",",
"sparse",
",",
"symbol",
")",
":",
"mass",
"=",
"self",
".",
"se",
".",
"get",
"(",
"mod",
",",
"'mass'",
")",
"Xspecie... | plot one species for cycle between xlim1 and xlim2.
Parameters
----------
species : list
which species to plot.
mod : string or integer
Model (cycle) to plot.
xlim1, xlim2 : float
Mass coordinate range.
ylim1, ylim2 : float
Mass fraction coordinate range.
sparse : integer
Sparsity factor for points.
symbol : string
which symbol you want to use? | [
"plot",
"one",
"species",
"for",
"cycle",
"between",
"xlim1",
"and",
"xlim2",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L877-L904 | train | 41,623 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.trajectory | def trajectory(self, ini, end, delta, mass_coo, age_in_sec=False,
online=False):
"""
create a trajectory out of a stellar model
Parameters
----------
ini : integer
Initial model, inital cycle number.
end : integer
Final model, final cycle number.
delta : integer
Sparsity factor of the frames.
mass_coo : float
Mass coordinate for the traj.
age_in_sec : boolean, optional
Set to True if age in se file is in seconds (like in MESA).
The default is False.
Returns
--------
float
radius_at_mass_coo, density_at_mass_coo,
temperature_at_mass_coo, age_all
Notes
-----
plus writes a file with the trajectory information to be used
with ppn.
Warning: remove the old trajectory, if you have any for the same
mass coordinate. You are appending data, not overwriting.
Update: this method works for output types with indexes going
from the outside in (MESA) or the other way around. Also the
requested quantities are linearly interpolated in the mass
shell.
online: boolean, optional
are you working online in the ipython notebook? If so,
you will be given an HTML link to download the file.
"""
filename='traj_'+str(mass_coo)+'.dat'
f = open(filename,'a')
radius_at_mass_coo=[]
density_at_mass_coo=[]
temperature_at_mass_coo=[]
masses=self.se.get(list(range(ini,end+1,delta)),'mass')
temps=self.se.get(list(range(ini,end+1,delta)),'temperature')
rhos=self.se.get(list(range(ini,end+1,delta)),'rho')
radii=self.se.get(list(range(ini,end+1,delta)),'radius')
ages=self.se.get(list(range(ini,end+1,delta)),'age')
cycs=list(range(ini,end+1,delta))
age_all=[]
for i in range(len(ages)):
age=ages[i]
if age_in_sec:
age /= constants.one_year
mass=masses[i]
temperature=temps[i]
rho=rhos[i]
radius=radii[i]
my_things=[temperature,rho,radius]
if mass[0]>mass[len(mass)-1]:
zone_above=where(mass>mass_coo)[0][-1]
zone_below=zone_above+1
else:
zone_above=where(mass>mass_coo)[0][0]
zone_below=zone_above-1
if mass[zone_below]>mass[zone_above]:
sys.exit("ERROR: finding of zone index confused")
all_things_interplt=[]
for thing in my_things:
thing_interplt=thing[zone_below]+(mass_coo-mass[zone_below])* \
(thing[zone_above]-thing[zone_below])/(mass[zone_above]-mass[zone_below])
all_things_interplt.append(thing_interplt)
this_temperature,this_rho,this_radius=all_things_interplt
string = str(cycs[i])+' '+str(age)+' '+str(this_temperature)+' '+str(this_rho)
f.write(string+"\n")
radius_at_mass_coo.append(this_radius)
density_at_mass_coo.append(this_rho)
temperature_at_mass_coo.append(this_temperature)
age_all.append(age)
f.close()
if online:
return FileLink(filename)
return radius_at_mass_coo, density_at_mass_coo, temperature_at_mass_coo, age_all | python | def trajectory(self, ini, end, delta, mass_coo, age_in_sec=False,
online=False):
"""
create a trajectory out of a stellar model
Parameters
----------
ini : integer
Initial model, inital cycle number.
end : integer
Final model, final cycle number.
delta : integer
Sparsity factor of the frames.
mass_coo : float
Mass coordinate for the traj.
age_in_sec : boolean, optional
Set to True if age in se file is in seconds (like in MESA).
The default is False.
Returns
--------
float
radius_at_mass_coo, density_at_mass_coo,
temperature_at_mass_coo, age_all
Notes
-----
plus writes a file with the trajectory information to be used
with ppn.
Warning: remove the old trajectory, if you have any for the same
mass coordinate. You are appending data, not overwriting.
Update: this method works for output types with indexes going
from the outside in (MESA) or the other way around. Also the
requested quantities are linearly interpolated in the mass
shell.
online: boolean, optional
are you working online in the ipython notebook? If so,
you will be given an HTML link to download the file.
"""
filename='traj_'+str(mass_coo)+'.dat'
f = open(filename,'a')
radius_at_mass_coo=[]
density_at_mass_coo=[]
temperature_at_mass_coo=[]
masses=self.se.get(list(range(ini,end+1,delta)),'mass')
temps=self.se.get(list(range(ini,end+1,delta)),'temperature')
rhos=self.se.get(list(range(ini,end+1,delta)),'rho')
radii=self.se.get(list(range(ini,end+1,delta)),'radius')
ages=self.se.get(list(range(ini,end+1,delta)),'age')
cycs=list(range(ini,end+1,delta))
age_all=[]
for i in range(len(ages)):
age=ages[i]
if age_in_sec:
age /= constants.one_year
mass=masses[i]
temperature=temps[i]
rho=rhos[i]
radius=radii[i]
my_things=[temperature,rho,radius]
if mass[0]>mass[len(mass)-1]:
zone_above=where(mass>mass_coo)[0][-1]
zone_below=zone_above+1
else:
zone_above=where(mass>mass_coo)[0][0]
zone_below=zone_above-1
if mass[zone_below]>mass[zone_above]:
sys.exit("ERROR: finding of zone index confused")
all_things_interplt=[]
for thing in my_things:
thing_interplt=thing[zone_below]+(mass_coo-mass[zone_below])* \
(thing[zone_above]-thing[zone_below])/(mass[zone_above]-mass[zone_below])
all_things_interplt.append(thing_interplt)
this_temperature,this_rho,this_radius=all_things_interplt
string = str(cycs[i])+' '+str(age)+' '+str(this_temperature)+' '+str(this_rho)
f.write(string+"\n")
radius_at_mass_coo.append(this_radius)
density_at_mass_coo.append(this_rho)
temperature_at_mass_coo.append(this_temperature)
age_all.append(age)
f.close()
if online:
return FileLink(filename)
return radius_at_mass_coo, density_at_mass_coo, temperature_at_mass_coo, age_all | [
"def",
"trajectory",
"(",
"self",
",",
"ini",
",",
"end",
",",
"delta",
",",
"mass_coo",
",",
"age_in_sec",
"=",
"False",
",",
"online",
"=",
"False",
")",
":",
"filename",
"=",
"'traj_'",
"+",
"str",
"(",
"mass_coo",
")",
"+",
"'.dat'",
"f",
"=",
... | create a trajectory out of a stellar model
Parameters
----------
ini : integer
Initial model, inital cycle number.
end : integer
Final model, final cycle number.
delta : integer
Sparsity factor of the frames.
mass_coo : float
Mass coordinate for the traj.
age_in_sec : boolean, optional
Set to True if age in se file is in seconds (like in MESA).
The default is False.
Returns
--------
float
radius_at_mass_coo, density_at_mass_coo,
temperature_at_mass_coo, age_all
Notes
-----
plus writes a file with the trajectory information to be used
with ppn.
Warning: remove the old trajectory, if you have any for the same
mass coordinate. You are appending data, not overwriting.
Update: this method works for output types with indexes going
from the outside in (MESA) or the other way around. Also the
requested quantities are linearly interpolated in the mass
shell.
online: boolean, optional
are you working online in the ipython notebook? If so,
you will be given an HTML link to download the file. | [
"create",
"a",
"trajectory",
"out",
"of",
"a",
"stellar",
"model"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L906-L997 | train | 41,624 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.abup_se_plot | def abup_se_plot(mod,species):
"""
plot species from one ABUPP file and the se file.
You must use this function in the directory where the ABP files
are and an ABUP file for model mod must exist.
Parameters
----------
mod : integer
Model to plot, you need to have an ABUPP file for that
model.
species : string
The species to plot.
Notes
-----
The species is set to 'C-12'.
"""
# Marco, you have already implemented finding headers and columns in
# ABUP files. You may want to transplant that into here?
species='C-12'
filename = 'ABUPP%07d0000.DAT' % mod
print(filename)
mass,c12=np.loadtxt(filename,skiprows=4,usecols=[1,18],unpack=True)
c12_se=self.se.get(mod,'iso_massf','C-12')
mass_se=self.se.get(mod,'mass')
pyl.plot(mass,c12)
pyl.plot(mass_se,c12_se,'o',label='cycle '+str(mod))
pyl.legend() | python | def abup_se_plot(mod,species):
"""
plot species from one ABUPP file and the se file.
You must use this function in the directory where the ABP files
are and an ABUP file for model mod must exist.
Parameters
----------
mod : integer
Model to plot, you need to have an ABUPP file for that
model.
species : string
The species to plot.
Notes
-----
The species is set to 'C-12'.
"""
# Marco, you have already implemented finding headers and columns in
# ABUP files. You may want to transplant that into here?
species='C-12'
filename = 'ABUPP%07d0000.DAT' % mod
print(filename)
mass,c12=np.loadtxt(filename,skiprows=4,usecols=[1,18],unpack=True)
c12_se=self.se.get(mod,'iso_massf','C-12')
mass_se=self.se.get(mod,'mass')
pyl.plot(mass,c12)
pyl.plot(mass_se,c12_se,'o',label='cycle '+str(mod))
pyl.legend() | [
"def",
"abup_se_plot",
"(",
"mod",
",",
"species",
")",
":",
"# Marco, you have already implemented finding headers and columns in",
"# ABUP files. You may want to transplant that into here?",
"species",
"=",
"'C-12'",
"filename",
"=",
"'ABUPP%07d0000.DAT'",
"%",
"mod",
"print",
... | plot species from one ABUPP file and the se file.
You must use this function in the directory where the ABP files
are and an ABUP file for model mod must exist.
Parameters
----------
mod : integer
Model to plot, you need to have an ABUPP file for that
model.
species : string
The species to plot.
Notes
-----
The species is set to 'C-12'. | [
"plot",
"species",
"from",
"one",
"ABUPP",
"file",
"and",
"the",
"se",
"file",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L2027-L2061 | train | 41,625 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.decay | def decay(self, mass_frac):
"""
this module simply calculate abundances of isotopes after decay.
It requires that before it is used a call is made to
_read_iso_abund_marco and _stable_species.
Parameters
----------
mass_frac : list
alist of mass_frac dicts.
See Also
--------
_read_iso_abund_marco(), nuutils.Utils._stable_species()
"""
import nuutils as u
global decayed_multi_d
decayed_multi_d=[]
#print len(mass_frac)
#print len(decay_raw)
for iii in range(len(mass_frac)):
jj=-1
decayed=[]
for i in range(len(u.decay_raw)):
if u.jdum[i] > 0.5:
jj=jj+1
dummy=0.
for j in range(len(u.decay_raw[i])):
try:
dum_str = u.decay_raw[i][j]
dummy = dummy + float(self.mass_frac[iii][u.cl[dum_str.lower().capitalize()]])
#print cl[dum_str.lower().capitalize()]
#print dum_str, mass_frac[iii][cl[dum_str.capitalize()]]
except KeyError:
None
#print 'I am not in the network:',decay_raw[i][j]
except IndexError:
None
#print 'I am not read',cl[decay_raw[i][j].lower().capitalize()],decay_raw[i][j]
decayed.append(dummy)
decayed_multi_d.append(decayed) | python | def decay(self, mass_frac):
"""
this module simply calculate abundances of isotopes after decay.
It requires that before it is used a call is made to
_read_iso_abund_marco and _stable_species.
Parameters
----------
mass_frac : list
alist of mass_frac dicts.
See Also
--------
_read_iso_abund_marco(), nuutils.Utils._stable_species()
"""
import nuutils as u
global decayed_multi_d
decayed_multi_d=[]
#print len(mass_frac)
#print len(decay_raw)
for iii in range(len(mass_frac)):
jj=-1
decayed=[]
for i in range(len(u.decay_raw)):
if u.jdum[i] > 0.5:
jj=jj+1
dummy=0.
for j in range(len(u.decay_raw[i])):
try:
dum_str = u.decay_raw[i][j]
dummy = dummy + float(self.mass_frac[iii][u.cl[dum_str.lower().capitalize()]])
#print cl[dum_str.lower().capitalize()]
#print dum_str, mass_frac[iii][cl[dum_str.capitalize()]]
except KeyError:
None
#print 'I am not in the network:',decay_raw[i][j]
except IndexError:
None
#print 'I am not read',cl[decay_raw[i][j].lower().capitalize()],decay_raw[i][j]
decayed.append(dummy)
decayed_multi_d.append(decayed) | [
"def",
"decay",
"(",
"self",
",",
"mass_frac",
")",
":",
"import",
"nuutils",
"as",
"u",
"global",
"decayed_multi_d",
"decayed_multi_d",
"=",
"[",
"]",
"#print len(mass_frac)",
"#print len(decay_raw)",
"for",
"iii",
"in",
"range",
"(",
"len",
"(",
"mass_frac",
... | this module simply calculate abundances of isotopes after decay.
It requires that before it is used a call is made to
_read_iso_abund_marco and _stable_species.
Parameters
----------
mass_frac : list
alist of mass_frac dicts.
See Also
--------
_read_iso_abund_marco(), nuutils.Utils._stable_species() | [
"this",
"module",
"simply",
"calculate",
"abundances",
"of",
"isotopes",
"after",
"decay",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L2132-L2177 | train | 41,626 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.windyields | def windyields(self, ini, end, delta, **keyw):
"""
This function returns the wind yields and ejected masses.
X_i, E_i = data.windyields(ini, end, delta)
Parameters
----------
ini : integer
The starting cycle.
end : integer
The finishing cycle.
delta : integer
The cycle interval.
keyw : dict
A dict of key word arguments.
Returns
-------
list
The function returns a list of the wind yields(X_i) and
a list of the ejected masses(E_i) in the mass units that
were used (usually solar masses).
Notes
-----
The following keywords cand also be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| tmass | "mass" |
+------------------+---------------+
| cycle | "cycle" |
+------------------+---------------+
The keyword arguments are used when the variables within the
input file differ in name from their default values typically
found in an MPPNP output file. If the data table differs in
name, use these keywords. For example, if the table for the
abundances is called "abundances" instead of "iso_massf", then
use abund = "abundances" as a keyword argument.
"""
if ("tmass" in keyw) == False:
keyw["tmass"] = "mass"
if ("abund" in keyw) == False:
keyw["abund"] = "iso_massf"
if ("cycle" in keyw) == False:
keyw["cycle"] = "cycle"
print("Windyields() initialised. Reading files...")
ypsinit = []
niso = 0
X_i = []
E_i = []
totalmass = []
ypssurf = []
cycles = []
first = True
# The following statements copy global functions into local memory,
# which is called faster, speeding up the code slightly
wc = self._windcalc
cycleret = self.se.cycles
retrieve = self.se.get
capp = cycles.extend
tapp = totalmass.extend
yapp = ypssurf.extend
# Retrieve the data from the files
for i in range(ini,end+1,delta):
step = int(i)
capp([int(cycleret[i-ini])])
tapp([retrieve(step,keyw["tmass"])])
yapp([retrieve(step,keyw["abund"])])
print("Reading complete. Calculating yields and ejected masses...")
nsteps = len(cycles)-1
niso = len(ypssurf[0])
X_i = np.zeros([niso], float)
E_i = np.zeros([niso], float)
# Call the windyields calculator
X_i, E_i = wc(first, totalmass, nsteps, niso, ypssurf, \
ypsinit, X_i, E_i, cycles)
return X_i, E_i | python | def windyields(self, ini, end, delta, **keyw):
"""
This function returns the wind yields and ejected masses.
X_i, E_i = data.windyields(ini, end, delta)
Parameters
----------
ini : integer
The starting cycle.
end : integer
The finishing cycle.
delta : integer
The cycle interval.
keyw : dict
A dict of key word arguments.
Returns
-------
list
The function returns a list of the wind yields(X_i) and
a list of the ejected masses(E_i) in the mass units that
were used (usually solar masses).
Notes
-----
The following keywords cand also be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| tmass | "mass" |
+------------------+---------------+
| cycle | "cycle" |
+------------------+---------------+
The keyword arguments are used when the variables within the
input file differ in name from their default values typically
found in an MPPNP output file. If the data table differs in
name, use these keywords. For example, if the table for the
abundances is called "abundances" instead of "iso_massf", then
use abund = "abundances" as a keyword argument.
"""
if ("tmass" in keyw) == False:
keyw["tmass"] = "mass"
if ("abund" in keyw) == False:
keyw["abund"] = "iso_massf"
if ("cycle" in keyw) == False:
keyw["cycle"] = "cycle"
print("Windyields() initialised. Reading files...")
ypsinit = []
niso = 0
X_i = []
E_i = []
totalmass = []
ypssurf = []
cycles = []
first = True
# The following statements copy global functions into local memory,
# which is called faster, speeding up the code slightly
wc = self._windcalc
cycleret = self.se.cycles
retrieve = self.se.get
capp = cycles.extend
tapp = totalmass.extend
yapp = ypssurf.extend
# Retrieve the data from the files
for i in range(ini,end+1,delta):
step = int(i)
capp([int(cycleret[i-ini])])
tapp([retrieve(step,keyw["tmass"])])
yapp([retrieve(step,keyw["abund"])])
print("Reading complete. Calculating yields and ejected masses...")
nsteps = len(cycles)-1
niso = len(ypssurf[0])
X_i = np.zeros([niso], float)
E_i = np.zeros([niso], float)
# Call the windyields calculator
X_i, E_i = wc(first, totalmass, nsteps, niso, ypssurf, \
ypsinit, X_i, E_i, cycles)
return X_i, E_i | [
"def",
"windyields",
"(",
"self",
",",
"ini",
",",
"end",
",",
"delta",
",",
"*",
"*",
"keyw",
")",
":",
"if",
"(",
"\"tmass\"",
"in",
"keyw",
")",
"==",
"False",
":",
"keyw",
"[",
"\"tmass\"",
"]",
"=",
"\"mass\"",
"if",
"(",
"\"abund\"",
"in",
... | This function returns the wind yields and ejected masses.
X_i, E_i = data.windyields(ini, end, delta)
Parameters
----------
ini : integer
The starting cycle.
end : integer
The finishing cycle.
delta : integer
The cycle interval.
keyw : dict
A dict of key word arguments.
Returns
-------
list
The function returns a list of the wind yields(X_i) and
a list of the ejected masses(E_i) in the mass units that
were used (usually solar masses).
Notes
-----
The following keywords cand also be used:
+------------------+---------------+
| Keyword Argument | Default Value |
+==================+===============+
| abund | "iso_massf" |
+------------------+---------------+
| tmass | "mass" |
+------------------+---------------+
| cycle | "cycle" |
+------------------+---------------+
The keyword arguments are used when the variables within the
input file differ in name from their default values typically
found in an MPPNP output file. If the data table differs in
name, use these keywords. For example, if the table for the
abundances is called "abundances" instead of "iso_massf", then
use abund = "abundances" as a keyword argument. | [
"This",
"function",
"returns",
"the",
"wind",
"yields",
"and",
"ejected",
"masses",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L3640-L3730 | train | 41,627 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.average_iso_abund_marco | def average_iso_abund_marco(self,mass_range,cycle,stable,i_decay):
"""
Interface to average over mass_range.
Parameters
----------
mass_range : list
A 1x2 array required to plot data in a certain mass range.
Needed for _read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco
stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2,
then plot decayed. Make sense only if stable is true.
See Also
--------
_read_iso_abund_marco()
"""
import nuutils as u
if not stable and i_decay == 2:
print('ERROR: choose i_decay = 1')
return
#data=mp.se(directory,name_h5_file)
self._read_iso_abund_marco(mass_range,cycle)
#print spe
if i_decay == 2:
u.stable_specie()
self.decay(self.mass_frac)
# here I am calculating average mass fraction for all isotopes in given mass range, and then
# if needed calculating average over decayed.
# warning: mass_range is bigger than used_masses range, by definition. Should I use it?
print('average over used_masses range, not over original mass_range')
print(used_masses[0],used_masses[len(used_masses)-1],'instead of',mass_range[0],mass_range[1])
global average_mass_frac
average_mass_frac = []
if len(used_masses) >= 2:
dm_tot = abs(used_masses[len(used_masses)-1]-used_masses[0])
for j in range(len(u.spe)-1):
temp = 0.
for i in range(len(used_masses)-1):
dm_i = abs(used_masses[i+1]-used_masses[i])
temp = float(self.mass_frac[i][j]*dm_i/dm_tot) + temp
average_mass_frac.append(temp)
#print average_mass_frac
elif len(used_masses) == 1:
print('case with 1 mass zone only, not implemented yet')
somma = 0.
somma = sum(average_mass_frac)
print('departure from 1 of sum of average_mass_frac=',abs(1. - somma))
# not let's do it over decayed also, if i_decay = 2
if i_decay == 2:
global average_mass_frac_decay
average_mass_frac_decay = []
dm_tot = abs(used_masses[len(used_masses)-1]-used_masses[0])
#
#print len(decayed_multi_d[0]),decayed_multi_d[0]
for j in range(len(u.back_ind)):
temp = 0.
for i in range(len(used_masses)-1):
dm_i = abs(used_masses[i+1]-used_masses[i])
temp = float(decayed_multi_d[i][j]*dm_i/dm_tot) + temp
average_mass_frac_decay.append(temp)
somma = 0.
somma = sum(average_mass_frac_decay)
print('departure from 1 of sum of average_mass_frac_decay=',abs(1. - somma)) | python | def average_iso_abund_marco(self,mass_range,cycle,stable,i_decay):
"""
Interface to average over mass_range.
Parameters
----------
mass_range : list
A 1x2 array required to plot data in a certain mass range.
Needed for _read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco
stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2,
then plot decayed. Make sense only if stable is true.
See Also
--------
_read_iso_abund_marco()
"""
import nuutils as u
if not stable and i_decay == 2:
print('ERROR: choose i_decay = 1')
return
#data=mp.se(directory,name_h5_file)
self._read_iso_abund_marco(mass_range,cycle)
#print spe
if i_decay == 2:
u.stable_specie()
self.decay(self.mass_frac)
# here I am calculating average mass fraction for all isotopes in given mass range, and then
# if needed calculating average over decayed.
# warning: mass_range is bigger than used_masses range, by definition. Should I use it?
print('average over used_masses range, not over original mass_range')
print(used_masses[0],used_masses[len(used_masses)-1],'instead of',mass_range[0],mass_range[1])
global average_mass_frac
average_mass_frac = []
if len(used_masses) >= 2:
dm_tot = abs(used_masses[len(used_masses)-1]-used_masses[0])
for j in range(len(u.spe)-1):
temp = 0.
for i in range(len(used_masses)-1):
dm_i = abs(used_masses[i+1]-used_masses[i])
temp = float(self.mass_frac[i][j]*dm_i/dm_tot) + temp
average_mass_frac.append(temp)
#print average_mass_frac
elif len(used_masses) == 1:
print('case with 1 mass zone only, not implemented yet')
somma = 0.
somma = sum(average_mass_frac)
print('departure from 1 of sum of average_mass_frac=',abs(1. - somma))
# not let's do it over decayed also, if i_decay = 2
if i_decay == 2:
global average_mass_frac_decay
average_mass_frac_decay = []
dm_tot = abs(used_masses[len(used_masses)-1]-used_masses[0])
#
#print len(decayed_multi_d[0]),decayed_multi_d[0]
for j in range(len(u.back_ind)):
temp = 0.
for i in range(len(used_masses)-1):
dm_i = abs(used_masses[i+1]-used_masses[i])
temp = float(decayed_multi_d[i][j]*dm_i/dm_tot) + temp
average_mass_frac_decay.append(temp)
somma = 0.
somma = sum(average_mass_frac_decay)
print('departure from 1 of sum of average_mass_frac_decay=',abs(1. - somma)) | [
"def",
"average_iso_abund_marco",
"(",
"self",
",",
"mass_range",
",",
"cycle",
",",
"stable",
",",
"i_decay",
")",
":",
"import",
"nuutils",
"as",
"u",
"if",
"not",
"stable",
"and",
"i_decay",
"==",
"2",
":",
"print",
"(",
"'ERROR: choose i_decay = 1'",
")"... | Interface to average over mass_range.
Parameters
----------
mass_range : list
A 1x2 array required to plot data in a certain mass range.
Needed for _read_iso_abund_marco.
cycle : integer
which cycle from the h5 file?. Needed for _read_iso_abund_marco
stable : boolean
Do you want to plot only stable or not.
i_decay : integer
If i_decay is 1, then plot not decayed. If i_decay is 2,
then plot decayed. Make sense only if stable is true.
See Also
--------
_read_iso_abund_marco() | [
"Interface",
"to",
"average",
"over",
"mass_range",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L3767-L3847 | train | 41,628 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se._get_elem_names | def _get_elem_names(self):
""" returns for one cycle an element name dictionary."""
import nuutils as u
# provide library for Z versus element names, and Z for elements
#element_name = self.se.elements
element_name = self.elements_names
u.give_zip_element_z_and_names(element_name)
self.z_of_element_name = u.index_z_for_elements | python | def _get_elem_names(self):
""" returns for one cycle an element name dictionary."""
import nuutils as u
# provide library for Z versus element names, and Z for elements
#element_name = self.se.elements
element_name = self.elements_names
u.give_zip_element_z_and_names(element_name)
self.z_of_element_name = u.index_z_for_elements | [
"def",
"_get_elem_names",
"(",
"self",
")",
":",
"import",
"nuutils",
"as",
"u",
"# provide library for Z versus element names, and Z for elements",
"#element_name = self.se.elements",
"element_name",
"=",
"self",
".",
"elements_names",
"u",
".",
"give_zip_element_z_and_names",... | returns for one cycle an element name dictionary. | [
"returns",
"for",
"one",
"cycle",
"an",
"element",
"name",
"dictionary",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L3851-L3860 | train | 41,629 |
NuGrid/NuGridPy | nugridpy/nugridse.py | se.get_abundance_iso_decay | def get_abundance_iso_decay(self,cycle):
"""
returns the decayed stable isotopes.
Parameters
----------
cycle : integer
The cycle.
"""
import nuutils as u
masses_for_this_cycle = self.se.get(cycle,'mass')
self._read_iso_abund_marco([min(masses_for_this_cycle),max(masses_for_this_cycle)],cycle)
u.stable_specie()
self.decay(self.mass_frac)
self.index_for_all_species = u.cl
self.index_for_stable_species = u.back_ind
self.decayed_stable_isotopes_per_cycle = decayed_multi_d
# from here read solar abundances
solar_factor = 2.
u.solar('iniab1.0E-02.ppn_GN93',solar_factor)
self.stable_isotope_identifier=u.jjdum
self.stable_isotope_list=u.stable
self.isotopic_production_factors=[]
for i in range(len(masses_for_this_cycle)):
pf_dum=[]
jj=0
for j in range(len(self.stable_isotope_identifier)):
if self.stable_isotope_identifier[j] == 1:
pf_dum.append(float(old_div(self.mass_frac[i][self.index_for_all_species[self.stable_isotope_list
[jj].capitalize()]],u.solar_abundance[self.stable_isotope_list[jj].lower()])))
jj=jj+1
#elif self.stable_isotope_identifier[j] == 0:
# pf_dum.append(float(0.))
self.isotopic_production_factors.append(pf_dum)
self.isotopic_production_factors_decayed=[]
for i in range(len(masses_for_this_cycle)):
pf_dum_d=[]
jj=0
for j in range(len(self.stable_isotope_identifier)):
if self.stable_isotope_identifier[j] == 1:
pf_dum_d.append(float(old_div(self.decayed_stable_isotopes_per_cycle[i][self.index_for_stable_species[self.stable_isotope_list
[jj].upper()]],u.solar_abundance[self.stable_isotope_list[jj].lower()])))
jj=jj+1
self.isotopic_production_factors_decayed.append(pf_dum_d) | python | def get_abundance_iso_decay(self,cycle):
"""
returns the decayed stable isotopes.
Parameters
----------
cycle : integer
The cycle.
"""
import nuutils as u
masses_for_this_cycle = self.se.get(cycle,'mass')
self._read_iso_abund_marco([min(masses_for_this_cycle),max(masses_for_this_cycle)],cycle)
u.stable_specie()
self.decay(self.mass_frac)
self.index_for_all_species = u.cl
self.index_for_stable_species = u.back_ind
self.decayed_stable_isotopes_per_cycle = decayed_multi_d
# from here read solar abundances
solar_factor = 2.
u.solar('iniab1.0E-02.ppn_GN93',solar_factor)
self.stable_isotope_identifier=u.jjdum
self.stable_isotope_list=u.stable
self.isotopic_production_factors=[]
for i in range(len(masses_for_this_cycle)):
pf_dum=[]
jj=0
for j in range(len(self.stable_isotope_identifier)):
if self.stable_isotope_identifier[j] == 1:
pf_dum.append(float(old_div(self.mass_frac[i][self.index_for_all_species[self.stable_isotope_list
[jj].capitalize()]],u.solar_abundance[self.stable_isotope_list[jj].lower()])))
jj=jj+1
#elif self.stable_isotope_identifier[j] == 0:
# pf_dum.append(float(0.))
self.isotopic_production_factors.append(pf_dum)
self.isotopic_production_factors_decayed=[]
for i in range(len(masses_for_this_cycle)):
pf_dum_d=[]
jj=0
for j in range(len(self.stable_isotope_identifier)):
if self.stable_isotope_identifier[j] == 1:
pf_dum_d.append(float(old_div(self.decayed_stable_isotopes_per_cycle[i][self.index_for_stable_species[self.stable_isotope_list
[jj].upper()]],u.solar_abundance[self.stable_isotope_list[jj].lower()])))
jj=jj+1
self.isotopic_production_factors_decayed.append(pf_dum_d) | [
"def",
"get_abundance_iso_decay",
"(",
"self",
",",
"cycle",
")",
":",
"import",
"nuutils",
"as",
"u",
"masses_for_this_cycle",
"=",
"self",
".",
"se",
".",
"get",
"(",
"cycle",
",",
"'mass'",
")",
"self",
".",
"_read_iso_abund_marco",
"(",
"[",
"min",
"("... | returns the decayed stable isotopes.
Parameters
----------
cycle : integer
The cycle. | [
"returns",
"the",
"decayed",
"stable",
"isotopes",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/nugridse.py#L3864-L3917 | train | 41,630 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/simple_storage.py | SimpleStorage.maximum_size_bytes | def maximum_size_bytes(self):
"""Gets the biggest disk drive
:returns size in bytes.
"""
return utils.max_safe(
[device.get('CapacityBytes') for device in self.devices
if device.get('CapacityBytes') is not None]) | python | def maximum_size_bytes(self):
"""Gets the biggest disk drive
:returns size in bytes.
"""
return utils.max_safe(
[device.get('CapacityBytes') for device in self.devices
if device.get('CapacityBytes') is not None]) | [
"def",
"maximum_size_bytes",
"(",
"self",
")",
":",
"return",
"utils",
".",
"max_safe",
"(",
"[",
"device",
".",
"get",
"(",
"'CapacityBytes'",
")",
"for",
"device",
"in",
"self",
".",
"devices",
"if",
"device",
".",
"get",
"(",
"'CapacityBytes'",
")",
"... | Gets the biggest disk drive
:returns size in bytes. | [
"Gets",
"the",
"biggest",
"disk",
"drive"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/simple_storage.py#L39-L46 | train | 41,631 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.push_power_button | def push_power_button(self, target_value):
"""Reset the system in hpe exclusive manner.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO.
"""
if target_value not in mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV:
msg = ('The parameter "%(parameter)s" value "%(target_value)s" is '
'invalid. Valid values are: %(valid_power_values)s' %
{'parameter': 'target_value', 'target_value': target_value,
'valid_power_values': (
mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV.keys())})
raise exception.InvalidInputError(msg)
value = mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV[target_value]
target_uri = (
self._get_hpe_push_power_button_action_element().target_uri)
self._conn.post(target_uri, data={'PushType': value}) | python | def push_power_button(self, target_value):
"""Reset the system in hpe exclusive manner.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO.
"""
if target_value not in mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV:
msg = ('The parameter "%(parameter)s" value "%(target_value)s" is '
'invalid. Valid values are: %(valid_power_values)s' %
{'parameter': 'target_value', 'target_value': target_value,
'valid_power_values': (
mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV.keys())})
raise exception.InvalidInputError(msg)
value = mappings.PUSH_POWER_BUTTON_VALUE_MAP_REV[target_value]
target_uri = (
self._get_hpe_push_power_button_action_element().target_uri)
self._conn.post(target_uri, data={'PushType': value}) | [
"def",
"push_power_button",
"(",
"self",
",",
"target_value",
")",
":",
"if",
"target_value",
"not",
"in",
"mappings",
".",
"PUSH_POWER_BUTTON_VALUE_MAP_REV",
":",
"msg",
"=",
"(",
"'The parameter \"%(parameter)s\" value \"%(target_value)s\" is '",
"'invalid. Valid values are... | Reset the system in hpe exclusive manner.
:param target_value: The target value to be set.
:raises: InvalidInputError, if the target value is not
allowed.
:raises: SushyError, on an error from iLO. | [
"Reset",
"the",
"system",
"in",
"hpe",
"exclusive",
"manner",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L102-L122 | train | 41,632 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.bios_settings | def bios_settings(self):
"""Property to provide reference to `BIOSSettings` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return bios.BIOSSettings(
self._conn, utils.get_subresource_path_by(self, 'Bios'),
redfish_version=self.redfish_version) | python | def bios_settings(self):
"""Property to provide reference to `BIOSSettings` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return bios.BIOSSettings(
self._conn, utils.get_subresource_path_by(self, 'Bios'),
redfish_version=self.redfish_version) | [
"def",
"bios_settings",
"(",
"self",
")",
":",
"return",
"bios",
".",
"BIOSSettings",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"'Bios'",
")",
",",
"redfish_version",
"=",
"self",
".",
"redfish_version",
")"
] | Property to provide reference to `BIOSSettings` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | [
"Property",
"to",
"provide",
"reference",
"to",
"BIOSSettings",
"instance"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L126-L134 | train | 41,633 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.secure_boot | def secure_boot(self):
"""Property to provide reference to `SecureBoot` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return secure_boot.SecureBoot(
self._conn, utils.get_subresource_path_by(self, 'SecureBoot'),
redfish_version=self.redfish_version) | python | def secure_boot(self):
"""Property to provide reference to `SecureBoot` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return secure_boot.SecureBoot(
self._conn, utils.get_subresource_path_by(self, 'SecureBoot'),
redfish_version=self.redfish_version) | [
"def",
"secure_boot",
"(",
"self",
")",
":",
"return",
"secure_boot",
".",
"SecureBoot",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"'SecureBoot'",
")",
",",
"redfish_version",
"=",
"self",
".",
"redfish_version"... | Property to provide reference to `SecureBoot` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | [
"Property",
"to",
"provide",
"reference",
"to",
"SecureBoot",
"instance"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L191-L199 | train | 41,634 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.ethernet_interfaces | def ethernet_interfaces(self):
"""Provide reference to EthernetInterfacesCollection instance"""
return ethernet_interface.EthernetInterfaceCollection(
self._conn,
self._get_hpe_sub_resource_collection_path('EthernetInterfaces'),
redfish_version=self.redfish_version) | python | def ethernet_interfaces(self):
"""Provide reference to EthernetInterfacesCollection instance"""
return ethernet_interface.EthernetInterfaceCollection(
self._conn,
self._get_hpe_sub_resource_collection_path('EthernetInterfaces'),
redfish_version=self.redfish_version) | [
"def",
"ethernet_interfaces",
"(",
"self",
")",
":",
"return",
"ethernet_interface",
".",
"EthernetInterfaceCollection",
"(",
"self",
".",
"_conn",
",",
"self",
".",
"_get_hpe_sub_resource_collection_path",
"(",
"'EthernetInterfaces'",
")",
",",
"redfish_version",
"=",
... | Provide reference to EthernetInterfacesCollection instance | [
"Provide",
"reference",
"to",
"EthernetInterfacesCollection",
"instance"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L212-L217 | train | 41,635 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.smart_storage | def smart_storage(self):
"""This property gets the object for smart storage.
This property gets the object for smart storage.
There is no collection for smart storages.
:returns: an instance of smart storage
"""
return hpe_smart_storage.HPESmartStorage(
self._conn, utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', 'SmartStorage']),
redfish_version=self.redfish_version) | python | def smart_storage(self):
"""This property gets the object for smart storage.
This property gets the object for smart storage.
There is no collection for smart storages.
:returns: an instance of smart storage
"""
return hpe_smart_storage.HPESmartStorage(
self._conn, utils.get_subresource_path_by(
self, ['Oem', 'Hpe', 'Links', 'SmartStorage']),
redfish_version=self.redfish_version) | [
"def",
"smart_storage",
"(",
"self",
")",
":",
"return",
"hpe_smart_storage",
".",
"HPESmartStorage",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"[",
"'Oem'",
",",
"'Hpe'",
",",
"'Links'",
",",
"'SmartStorage'",
... | This property gets the object for smart storage.
This property gets the object for smart storage.
There is no collection for smart storages.
:returns: an instance of smart storage | [
"This",
"property",
"gets",
"the",
"object",
"for",
"smart",
"storage",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L221-L231 | train | 41,636 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.storages | def storages(self):
"""This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages
"""
return storage.StorageCollection(
self._conn, utils.get_subresource_path_by(self, 'Storage'),
redfish_version=self.redfish_version) | python | def storages(self):
"""This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages
"""
return storage.StorageCollection(
self._conn, utils.get_subresource_path_by(self, 'Storage'),
redfish_version=self.redfish_version) | [
"def",
"storages",
"(",
"self",
")",
":",
"return",
"storage",
".",
"StorageCollection",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"'Storage'",
")",
",",
"redfish_version",
"=",
"self",
".",
"redfish_version",
... | This property gets the list of instances for Storages
This property gets the list of instances for Storages
:returns: a list of instances of Storages | [
"This",
"property",
"gets",
"the",
"list",
"of",
"instances",
"for",
"Storages"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L235-L243 | train | 41,637 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.simple_storages | def simple_storages(self):
"""This property gets the list of instances for SimpleStorages
:returns: a list of instances of SimpleStorages
"""
return simple_storage.SimpleStorageCollection(
self._conn, utils.get_subresource_path_by(self, 'SimpleStorage'),
redfish_version=self.redfish_version) | python | def simple_storages(self):
"""This property gets the list of instances for SimpleStorages
:returns: a list of instances of SimpleStorages
"""
return simple_storage.SimpleStorageCollection(
self._conn, utils.get_subresource_path_by(self, 'SimpleStorage'),
redfish_version=self.redfish_version) | [
"def",
"simple_storages",
"(",
"self",
")",
":",
"return",
"simple_storage",
".",
"SimpleStorageCollection",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"'SimpleStorage'",
")",
",",
"redfish_version",
"=",
"self",
"... | This property gets the list of instances for SimpleStorages
:returns: a list of instances of SimpleStorages | [
"This",
"property",
"gets",
"the",
"list",
"of",
"instances",
"for",
"SimpleStorages"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L247-L254 | train | 41,638 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.memory | def memory(self):
"""Property to provide reference to `MemoryCollection` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return memory.MemoryCollection(
self._conn, utils.get_subresource_path_by(self, 'Memory'),
redfish_version=self.redfish_version) | python | def memory(self):
"""Property to provide reference to `MemoryCollection` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return memory.MemoryCollection(
self._conn, utils.get_subresource_path_by(self, 'Memory'),
redfish_version=self.redfish_version) | [
"def",
"memory",
"(",
"self",
")",
":",
"return",
"memory",
".",
"MemoryCollection",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"'Memory'",
")",
",",
"redfish_version",
"=",
"self",
".",
"redfish_version",
")"
... | Property to provide reference to `MemoryCollection` instance
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | [
"Property",
"to",
"provide",
"reference",
"to",
"MemoryCollection",
"instance"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L258-L266 | train | 41,639 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.get_smart_storage_config | def get_smart_storage_config(self, smart_storage_config_url):
"""Returns a SmartStorageConfig Instance for each controller."""
return (smart_storage_config.
HPESmartStorageConfig(self._conn, smart_storage_config_url,
redfish_version=self.redfish_version)) | python | def get_smart_storage_config(self, smart_storage_config_url):
"""Returns a SmartStorageConfig Instance for each controller."""
return (smart_storage_config.
HPESmartStorageConfig(self._conn, smart_storage_config_url,
redfish_version=self.redfish_version)) | [
"def",
"get_smart_storage_config",
"(",
"self",
",",
"smart_storage_config_url",
")",
":",
"return",
"(",
"smart_storage_config",
".",
"HPESmartStorageConfig",
"(",
"self",
".",
"_conn",
",",
"smart_storage_config_url",
",",
"redfish_version",
"=",
"self",
".",
"redfi... | Returns a SmartStorageConfig Instance for each controller. | [
"Returns",
"a",
"SmartStorageConfig",
"Instance",
"for",
"each",
"controller",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L268-L272 | train | 41,640 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem._get_smart_storage_config_by_controller_model | def _get_smart_storage_config_by_controller_model(self, controller_model):
"""Returns a SmartStorageConfig Instance for controller by model.
:returns: SmartStorageConfig Instance for controller
"""
ac = self.smart_storage.array_controllers.array_controller_by_model(
controller_model)
if ac:
for ssc_id in self.smart_storage_config_identities:
ssc_obj = self.get_smart_storage_config(ssc_id)
if ac.location == ssc_obj.location:
return ssc_obj | python | def _get_smart_storage_config_by_controller_model(self, controller_model):
"""Returns a SmartStorageConfig Instance for controller by model.
:returns: SmartStorageConfig Instance for controller
"""
ac = self.smart_storage.array_controllers.array_controller_by_model(
controller_model)
if ac:
for ssc_id in self.smart_storage_config_identities:
ssc_obj = self.get_smart_storage_config(ssc_id)
if ac.location == ssc_obj.location:
return ssc_obj | [
"def",
"_get_smart_storage_config_by_controller_model",
"(",
"self",
",",
"controller_model",
")",
":",
"ac",
"=",
"self",
".",
"smart_storage",
".",
"array_controllers",
".",
"array_controller_by_model",
"(",
"controller_model",
")",
"if",
"ac",
":",
"for",
"ssc_id",... | Returns a SmartStorageConfig Instance for controller by model.
:returns: SmartStorageConfig Instance for controller | [
"Returns",
"a",
"SmartStorageConfig",
"Instance",
"for",
"controller",
"by",
"model",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L274-L285 | train | 41,641 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.check_smart_storage_config_ids | def check_smart_storage_config_ids(self):
"""Check SmartStorageConfig controllers is there in hardware.
:raises: IloError, on an error from iLO.
"""
if self.smart_storage_config_identities is None:
msg = ('The Redfish controller failed to get the '
'SmartStorageConfig controller configurations.')
LOG.debug(msg)
raise exception.IloError(msg) | python | def check_smart_storage_config_ids(self):
"""Check SmartStorageConfig controllers is there in hardware.
:raises: IloError, on an error from iLO.
"""
if self.smart_storage_config_identities is None:
msg = ('The Redfish controller failed to get the '
'SmartStorageConfig controller configurations.')
LOG.debug(msg)
raise exception.IloError(msg) | [
"def",
"check_smart_storage_config_ids",
"(",
"self",
")",
":",
"if",
"self",
".",
"smart_storage_config_identities",
"is",
"None",
":",
"msg",
"=",
"(",
"'The Redfish controller failed to get the '",
"'SmartStorageConfig controller configurations.'",
")",
"LOG",
".",
"debu... | Check SmartStorageConfig controllers is there in hardware.
:raises: IloError, on an error from iLO. | [
"Check",
"SmartStorageConfig",
"controllers",
"is",
"there",
"in",
"hardware",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L287-L296 | train | 41,642 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem.delete_raid | def delete_raid(self):
"""Delete the raid configuration on the hardware.
Loops through each SmartStorageConfig controller and clears the
raid configuration.
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
ld_exc_count = 0
for config_id in self.smart_storage_config_identities:
try:
ssc_obj = self.get_smart_storage_config(config_id)
ssc_obj.delete_raid()
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((config_id, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to delete the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
if ld_exc_count == len(self.smart_storage_config_identities):
msg = ('No logical drives are found in any controllers. Nothing '
'to delete.')
raise exception.IloLogicalDriveNotFoundError(msg) | python | def delete_raid(self):
"""Delete the raid configuration on the hardware.
Loops through each SmartStorageConfig controller and clears the
raid configuration.
:raises: IloError, on an error from iLO.
"""
self.check_smart_storage_config_ids()
any_exceptions = []
ld_exc_count = 0
for config_id in self.smart_storage_config_identities:
try:
ssc_obj = self.get_smart_storage_config(config_id)
ssc_obj.delete_raid()
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((config_id, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to delete the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
if ld_exc_count == len(self.smart_storage_config_identities):
msg = ('No logical drives are found in any controllers. Nothing '
'to delete.')
raise exception.IloLogicalDriveNotFoundError(msg) | [
"def",
"delete_raid",
"(",
"self",
")",
":",
"self",
".",
"check_smart_storage_config_ids",
"(",
")",
"any_exceptions",
"=",
"[",
"]",
"ld_exc_count",
"=",
"0",
"for",
"config_id",
"in",
"self",
".",
"smart_storage_config_identities",
":",
"try",
":",
"ssc_obj",... | Delete the raid configuration on the hardware.
Loops through each SmartStorageConfig controller and clears the
raid configuration.
:raises: IloError, on an error from iLO. | [
"Delete",
"the",
"raid",
"configuration",
"on",
"the",
"hardware",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L298-L327 | train | 41,643 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem._parse_raid_config_data | def _parse_raid_config_data(self, raid_config):
"""It will parse raid config data based on raid controllers
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'controller':
'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary of controllers, each containing list of
their respected logical drives.
"""
default = (
self.smart_storage.array_controllers.get_default_controller.model)
controllers = {default: []}
for ld in raid_config['logical_disks']:
if 'controller' not in ld.keys():
controllers[default].append(ld)
else:
ctrl = ld['controller']
if ctrl not in controllers:
controllers[ctrl] = []
controllers[ctrl].append(ld)
return controllers | python | def _parse_raid_config_data(self, raid_config):
"""It will parse raid config data based on raid controllers
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'controller':
'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary of controllers, each containing list of
their respected logical drives.
"""
default = (
self.smart_storage.array_controllers.get_default_controller.model)
controllers = {default: []}
for ld in raid_config['logical_disks']:
if 'controller' not in ld.keys():
controllers[default].append(ld)
else:
ctrl = ld['controller']
if ctrl not in controllers:
controllers[ctrl] = []
controllers[ctrl].append(ld)
return controllers | [
"def",
"_parse_raid_config_data",
"(",
"self",
",",
"raid_config",
")",
":",
"default",
"=",
"(",
"self",
".",
"smart_storage",
".",
"array_controllers",
".",
"get_default_controller",
".",
"model",
")",
"controllers",
"=",
"{",
"default",
":",
"[",
"]",
"}",
... | It will parse raid config data based on raid controllers
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'controller':
'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:returns: A dictionary of controllers, each containing list of
their respected logical drives. | [
"It",
"will",
"parse",
"raid",
"config",
"data",
"based",
"on",
"raid",
"controllers"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L329-L352 | train | 41,644 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem._post_create_read_raid | def _post_create_read_raid(self, raid_config):
"""Read the logical drives from the system after post-create raid
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloLogicalDriveNotFoundError, if no controllers are configured
:raises: IloError, if any error form iLO
:returns: A dictionary containing list of logical disks
"""
controllers = self._parse_raid_config_data(raid_config)
ld_exc_count = 0
any_exceptions = []
config = {'logical_disks': []}
for controller in controllers:
try:
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
result = ssc_obj.read_raid(controller=controller)
config['logical_disks'].extend(result['logical_disks'])
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if ld_exc_count == len(controllers):
msg = 'No logical drives are found in any controllers.'
raise exception.IloLogicalDriveNotFoundError(msg)
if any_exceptions:
msg = ('The Redfish controller failed to read the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
return config | python | def _post_create_read_raid(self, raid_config):
"""Read the logical drives from the system after post-create raid
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloLogicalDriveNotFoundError, if no controllers are configured
:raises: IloError, if any error form iLO
:returns: A dictionary containing list of logical disks
"""
controllers = self._parse_raid_config_data(raid_config)
ld_exc_count = 0
any_exceptions = []
config = {'logical_disks': []}
for controller in controllers:
try:
ssc_obj = (
self._get_smart_storage_config_by_controller_model(
controller))
if ssc_obj:
result = ssc_obj.read_raid(controller=controller)
config['logical_disks'].extend(result['logical_disks'])
except exception.IloLogicalDriveNotFoundError as e:
ld_exc_count += 1
except sushy.exceptions.SushyError as e:
any_exceptions.append((controller, str(e)))
if ld_exc_count == len(controllers):
msg = 'No logical drives are found in any controllers.'
raise exception.IloLogicalDriveNotFoundError(msg)
if any_exceptions:
msg = ('The Redfish controller failed to read the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
return config | [
"def",
"_post_create_read_raid",
"(",
"self",
",",
"raid_config",
")",
":",
"controllers",
"=",
"self",
".",
"_parse_raid_config_data",
"(",
"raid_config",
")",
"ld_exc_count",
"=",
"0",
"any_exceptions",
"=",
"[",
"]",
"config",
"=",
"{",
"'logical_disks'",
":"... | Read the logical drives from the system after post-create raid
:param raid_config: A dictionary containing target raid configuration
data. This data stucture should be as follows:
raid_config = {'logical_disks': [{'raid_level': 1,
'size_gb': 100, 'physical_disks': ['6I:1:5'],
'controller': 'HPE Smart Array P408i-a SR Gen10'},
<info-for-logical-disk-2>]}
:raises: IloLogicalDriveNotFoundError, if no controllers are configured
:raises: IloError, if any error form iLO
:returns: A dictionary containing list of logical disks | [
"Read",
"the",
"logical",
"drives",
"from",
"the",
"system",
"after",
"post",
"-",
"create",
"raid"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L393-L431 | train | 41,645 |
openstack/proliantutils | proliantutils/redfish/resources/system/system.py | HPESystem._post_delete_read_raid | def _post_delete_read_raid(self):
"""Read the logical drives from the system after post-delete raid
:raises: IloError, if any error form iLO
:returns: Empty dictionary with format: {'logical_disks': []}
"""
any_exceptions = []
ssc_ids = self.smart_storage_config_identities
config = {'logical_disks': []}
for ssc_id in ssc_ids:
try:
ssc_obj = self.get_smart_storage_config(ssc_id)
ac_obj = (
self.smart_storage.array_controllers.
array_controller_by_location(ssc_obj.location))
if ac_obj:
model = ac_obj.model
result = ssc_obj.read_raid()
if result:
config['logical_disks'].extend(result['logical_disks'])
except sushy.exceptions.SushyError as e:
any_exceptions.append((model, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to read the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
return config | python | def _post_delete_read_raid(self):
"""Read the logical drives from the system after post-delete raid
:raises: IloError, if any error form iLO
:returns: Empty dictionary with format: {'logical_disks': []}
"""
any_exceptions = []
ssc_ids = self.smart_storage_config_identities
config = {'logical_disks': []}
for ssc_id in ssc_ids:
try:
ssc_obj = self.get_smart_storage_config(ssc_id)
ac_obj = (
self.smart_storage.array_controllers.
array_controller_by_location(ssc_obj.location))
if ac_obj:
model = ac_obj.model
result = ssc_obj.read_raid()
if result:
config['logical_disks'].extend(result['logical_disks'])
except sushy.exceptions.SushyError as e:
any_exceptions.append((model, str(e)))
if any_exceptions:
msg = ('The Redfish controller failed to read the '
'raid configuration in one or more controllers with '
'Error: %(error)s' % {'error': str(any_exceptions)})
raise exception.IloError(msg)
return config | [
"def",
"_post_delete_read_raid",
"(",
"self",
")",
":",
"any_exceptions",
"=",
"[",
"]",
"ssc_ids",
"=",
"self",
".",
"smart_storage_config_identities",
"config",
"=",
"{",
"'logical_disks'",
":",
"[",
"]",
"}",
"for",
"ssc_id",
"in",
"ssc_ids",
":",
"try",
... | Read the logical drives from the system after post-delete raid
:raises: IloError, if any error form iLO
:returns: Empty dictionary with format: {'logical_disks': []} | [
"Read",
"the",
"logical",
"drives",
"from",
"the",
"system",
"after",
"post",
"-",
"delete",
"raid"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/system.py#L433-L461 | train | 41,646 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/table/_deserialization.py | _convert_json_to_entity | def _convert_json_to_entity(entry_element, property_resolver):
''' Convert json response to entity.
The entity format is:
{
"Address":"Mountain View",
"Age":23,
"AmountDue":200.23,
"CustomerCode@odata.type":"Edm.Guid",
"CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
"CustomerSince@odata.type":"Edm.DateTime",
"CustomerSince":"2008-07-10T00:00:00",
"IsActive":true,
"NumberOfOrders@odata.type":"Edm.Int64",
"NumberOfOrders":"255",
"PartitionKey":"mypartitionkey",
"RowKey":"myrowkey"
}
'''
entity = Entity()
properties = {}
edmtypes = {}
odata = {}
for name, value in entry_element.items():
if name.startswith('odata.'):
odata[name[6:]] = value
elif name.endswith('@odata.type'):
edmtypes[name[:-11]] = value
else:
properties[name] = value
# Partition key is a known property
partition_key = properties.pop('PartitionKey', None)
if partition_key:
entity['PartitionKey'] = partition_key
# Row key is a known property
row_key = properties.pop('RowKey', None)
if row_key:
entity['RowKey'] = row_key
# Timestamp is a known property
timestamp = properties.pop('Timestamp', None)
if timestamp:
entity['Timestamp'] = _from_entity_datetime(timestamp)
for name, value in properties.items():
mtype = edmtypes.get(name);
# use the property resolver if present
if property_resolver:
mtype = property_resolver(partition_key, row_key,
name, value, mtype)
# throw if the type returned is not a valid edm type
if mtype and mtype not in _EDM_TYPES:
raise AzureException(_ERROR_TYPE_NOT_SUPPORTED.format(mtype))
# Add type for Int32
if type(value) is int:
mtype = EdmType.INT32
# no type info, property should parse automatically
if not mtype:
entity[name] = value
else: # need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
try:
property = conv(value)
except Exception as e:
# throw if the type returned by the property resolver
# cannot be used in the conversion
if property_resolver:
raise AzureException(
_ERROR_INVALID_PROPERTY_RESOLVER.format(name, value, mtype))
else:
raise e
else:
property = EntityProperty(mtype, value)
entity[name] = property
# extract etag from entry
etag = odata.get('etag')
if timestamp:
etag = 'W/"datetime\'' + url_quote(timestamp) + '\'"'
entity['etag'] = etag
return entity | python | def _convert_json_to_entity(entry_element, property_resolver):
''' Convert json response to entity.
The entity format is:
{
"Address":"Mountain View",
"Age":23,
"AmountDue":200.23,
"CustomerCode@odata.type":"Edm.Guid",
"CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
"CustomerSince@odata.type":"Edm.DateTime",
"CustomerSince":"2008-07-10T00:00:00",
"IsActive":true,
"NumberOfOrders@odata.type":"Edm.Int64",
"NumberOfOrders":"255",
"PartitionKey":"mypartitionkey",
"RowKey":"myrowkey"
}
'''
entity = Entity()
properties = {}
edmtypes = {}
odata = {}
for name, value in entry_element.items():
if name.startswith('odata.'):
odata[name[6:]] = value
elif name.endswith('@odata.type'):
edmtypes[name[:-11]] = value
else:
properties[name] = value
# Partition key is a known property
partition_key = properties.pop('PartitionKey', None)
if partition_key:
entity['PartitionKey'] = partition_key
# Row key is a known property
row_key = properties.pop('RowKey', None)
if row_key:
entity['RowKey'] = row_key
# Timestamp is a known property
timestamp = properties.pop('Timestamp', None)
if timestamp:
entity['Timestamp'] = _from_entity_datetime(timestamp)
for name, value in properties.items():
mtype = edmtypes.get(name);
# use the property resolver if present
if property_resolver:
mtype = property_resolver(partition_key, row_key,
name, value, mtype)
# throw if the type returned is not a valid edm type
if mtype and mtype not in _EDM_TYPES:
raise AzureException(_ERROR_TYPE_NOT_SUPPORTED.format(mtype))
# Add type for Int32
if type(value) is int:
mtype = EdmType.INT32
# no type info, property should parse automatically
if not mtype:
entity[name] = value
else: # need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
try:
property = conv(value)
except Exception as e:
# throw if the type returned by the property resolver
# cannot be used in the conversion
if property_resolver:
raise AzureException(
_ERROR_INVALID_PROPERTY_RESOLVER.format(name, value, mtype))
else:
raise e
else:
property = EntityProperty(mtype, value)
entity[name] = property
# extract etag from entry
etag = odata.get('etag')
if timestamp:
etag = 'W/"datetime\'' + url_quote(timestamp) + '\'"'
entity['etag'] = etag
return entity | [
"def",
"_convert_json_to_entity",
"(",
"entry_element",
",",
"property_resolver",
")",
":",
"entity",
"=",
"Entity",
"(",
")",
"properties",
"=",
"{",
"}",
"edmtypes",
"=",
"{",
"}",
"odata",
"=",
"{",
"}",
"for",
"name",
",",
"value",
"in",
"entry_element... | Convert json response to entity.
The entity format is:
{
"Address":"Mountain View",
"Age":23,
"AmountDue":200.23,
"CustomerCode@odata.type":"Edm.Guid",
"CustomerCode":"c9da6455-213d-42c9-9a79-3e9149a57833",
"CustomerSince@odata.type":"Edm.DateTime",
"CustomerSince":"2008-07-10T00:00:00",
"IsActive":true,
"NumberOfOrders@odata.type":"Edm.Int64",
"NumberOfOrders":"255",
"PartitionKey":"mypartitionkey",
"RowKey":"myrowkey"
} | [
"Convert",
"json",
"response",
"to",
"entity",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/_deserialization.py#L95-L185 | train | 41,647 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/table/_deserialization.py | _extract_etag | def _extract_etag(response):
''' Extracts the etag from the response headers. '''
if response and response.headers:
for name, value in response.headers:
if name.lower() == 'etag':
return value
return None | python | def _extract_etag(response):
''' Extracts the etag from the response headers. '''
if response and response.headers:
for name, value in response.headers:
if name.lower() == 'etag':
return value
return None | [
"def",
"_extract_etag",
"(",
"response",
")",
":",
"if",
"response",
"and",
"response",
".",
"headers",
":",
"for",
"name",
",",
"value",
"in",
"response",
".",
"headers",
":",
"if",
"name",
".",
"lower",
"(",
")",
"==",
"'etag'",
":",
"return",
"value... | Extracts the etag from the response headers. | [
"Extracts",
"the",
"etag",
"from",
"the",
"response",
"headers",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/_deserialization.py#L236-L243 | train | 41,648 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/sharedaccesssignature.py | SharedAccessSignature.generate_blob | def generate_blob(self, container_name, blob_name, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = container_name + '/' + blob_name
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('b')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
return sas.get_token() | python | def generate_blob(self, container_name, blob_name, permission=None,
expiry=None, start=None, id=None, ip=None, protocol=None,
cache_control=None, content_disposition=None,
content_encoding=None, content_language=None,
content_type=None):
'''
Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature.
'''
resource_path = container_name + '/' + blob_name
sas = _SharedAccessHelper()
sas.add_base(permission, expiry, start, ip, protocol)
sas.add_id(id)
sas.add_resource('b')
sas.add_override_response_headers(cache_control, content_disposition,
content_encoding, content_language,
content_type)
sas.add_resource_signature(self.account_name, self.account_key, 'blob', resource_path)
return sas.get_token() | [
"def",
"generate_blob",
"(",
"self",
",",
"container_name",
",",
"blob_name",
",",
"permission",
"=",
"None",
",",
"expiry",
"=",
"None",
",",
"start",
"=",
"None",
",",
"id",
"=",
"None",
",",
"ip",
"=",
"None",
",",
"protocol",
"=",
"None",
",",
"c... | Generates a shared access signature for the blob.
Use the returned signature with the sas_token parameter of any BlobService.
:param str container_name:
Name of container.
:param str blob_name:
Name of blob.
:param BlobPermissions permission:
The permissions associated with the shared access signature. The
user is restricted to operations allowed by the permissions.
Permissions must be ordered read, write, delete, list.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has been
specified in an associated stored access policy.
:param expiry:
The time at which the shared access signature becomes invalid.
Required unless an id is given referencing a stored access policy
which contains this field. This field must be omitted if it has
been specified in an associated stored access policy. Azure will always
convert values to UTC. If a date is passed in without timezone info, it
is assumed to be UTC.
:type expiry: date or str
:param start:
The time at which the shared access signature becomes valid. If
omitted, start time for this call is assumed to be the time when the
storage service receives the request. Azure will always convert values
to UTC. If a date is passed in without timezone info, it is assumed to
be UTC.
:type start: date or str
:param str id:
A unique value up to 64 characters in length that correlates to a
stored access policy. To create a stored access policy, use
set_blob_service_properties.
:param str ip:
Specifies an IP address or a range of IP addresses from which to accept requests.
If the IP address from which the request originates does not match the IP address
or address range specified on the SAS token, the request is not authenticated.
For example, specifying sip=168.1.5.65 or sip=168.1.5.60-168.1.5.70 on the SAS
restricts the request to those IP addresses.
:param str protocol:
Specifies the protocol permitted for a request made. The default value
is https,http. See :class:`~azure.storage.models.Protocol` for possible values.
:param str cache_control:
Response header value for Cache-Control when resource is accessed
using this shared access signature.
:param str content_disposition:
Response header value for Content-Disposition when resource is accessed
using this shared access signature.
:param str content_encoding:
Response header value for Content-Encoding when resource is accessed
using this shared access signature.
:param str content_language:
Response header value for Content-Language when resource is accessed
using this shared access signature.
:param str content_type:
Response header value for Content-Type when resource is accessed
using this shared access signature. | [
"Generates",
"a",
"shared",
"access",
"signature",
"for",
"the",
"blob",
".",
"Use",
"the",
"returned",
"signature",
"with",
"the",
"sas_token",
"parameter",
"of",
"any",
"BlobService",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/sharedaccesssignature.py#L170-L245 | train | 41,649 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/table/tableservice.py | TableService.create_table | def create_table(self, table_name, fail_on_exist=False, timeout=None):
'''
Creates a new table in the storage account.
:param str table_name:
The name of the table to create. The table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
:param bool fail_on_exist:
Specifies whether to throw an exception if the table already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('table', table_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/Tables'
request.query = [('timeout', _int_to_str(timeout))]
request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
_DEFAULT_PREFER_HEADER,
_DEFAULT_ACCEPT_HEADER]
request.body = _get_request_body(_convert_table_to_json(table_name))
if not fail_on_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True | python | def create_table(self, table_name, fail_on_exist=False, timeout=None):
'''
Creates a new table in the storage account.
:param str table_name:
The name of the table to create. The table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
:param bool fail_on_exist:
Specifies whether to throw an exception if the table already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('table', table_name)
request = HTTPRequest()
request.method = 'POST'
request.host = self._get_host()
request.path = '/Tables'
request.query = [('timeout', _int_to_str(timeout))]
request.headers = [_DEFAULT_CONTENT_TYPE_HEADER,
_DEFAULT_PREFER_HEADER,
_DEFAULT_ACCEPT_HEADER]
request.body = _get_request_body(_convert_table_to_json(table_name))
if not fail_on_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_on_exist(ex)
return False
else:
self._perform_request(request)
return True | [
"def",
"create_table",
"(",
"self",
",",
"table_name",
",",
"fail_on_exist",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'table'",
",",
"table_name",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",... | Creates a new table in the storage account.
:param str table_name:
The name of the table to create. The table name may contain only
alphanumeric characters and cannot begin with a numeric character.
It is case-insensitive and must be from 3 to 63 characters long.
:param bool fail_on_exist:
Specifies whether to throw an exception if the table already exists.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was created. If fail_on_exist
was set to True, this will throw instead of returning false.
:rtype: bool | [
"Creates",
"a",
"new",
"table",
"in",
"the",
"storage",
"account",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/tableservice.py#L421-L458 | train | 41,650 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/table/tableservice.py | TableService.exists | def exists(self, table_name, timeout=None):
'''
Returns a boolean indicating whether the table exists.
:param str table_name:
The name of table to check for existence.
:param int timeout:
The server timeout, expressed in seconds.
:return: A boolean indicating whether the table exists.
:rtype: bool
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/Tables' + "('" + table_name + "')"
request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)]
request.query = [('timeout', _int_to_str(timeout))]
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False | python | def exists(self, table_name, timeout=None):
'''
Returns a boolean indicating whether the table exists.
:param str table_name:
The name of table to check for existence.
:param int timeout:
The server timeout, expressed in seconds.
:return: A boolean indicating whether the table exists.
:rtype: bool
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'GET'
request.host = self._get_host()
request.path = '/Tables' + "('" + table_name + "')"
request.headers = [('Accept', TablePayloadFormat.JSON_NO_METADATA)]
request.query = [('timeout', _int_to_str(timeout))]
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False | [
"def",
"exists",
"(",
"self",
",",
"table_name",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'table_name'",
",",
"table_name",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",
"hos... | Returns a boolean indicating whether the table exists.
:param str table_name:
The name of table to check for existence.
:param int timeout:
The server timeout, expressed in seconds.
:return: A boolean indicating whether the table exists.
:rtype: bool | [
"Returns",
"a",
"boolean",
"indicating",
"whether",
"the",
"table",
"exists",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/tableservice.py#L460-L484 | train | 41,651 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/table/tableservice.py | TableService.query_entities | def query_entities(self, table_name, filter=None, select=None, num_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a generator to list the entities in the table specified. The
generator will lazily follow the continuation tokens returned by the
service and stop when all entities have been returned or max_results is
reached.
If max_results is specified and the account has more than that number of
entities, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int num_results:
The maximum number of entities to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if max_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`
'''
args = (table_name,)
kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker,
'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout}
resp = self._query_entities(*args, **kwargs)
return ListGenerator(resp, self._query_entities, args, kwargs) | python | def query_entities(self, table_name, filter=None, select=None, num_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a generator to list the entities in the table specified. The
generator will lazily follow the continuation tokens returned by the
service and stop when all entities have been returned or max_results is
reached.
If max_results is specified and the account has more than that number of
entities, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int num_results:
The maximum number of entities to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if max_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
:rtype: :class:`~azure.storage.models.ListGenerator`
'''
args = (table_name,)
kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker,
'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout}
resp = self._query_entities(*args, **kwargs)
return ListGenerator(resp, self._query_entities, args, kwargs) | [
"def",
"query_entities",
"(",
"self",
",",
"table_name",
",",
"filter",
"=",
"None",
",",
"select",
"=",
"None",
",",
"num_results",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"accept",
"=",
"TablePayloadFormat",
".",
"JSON_MINIMAL_METADATA",
",",
"proper... | Returns a generator to list the entities in the table specified. The
generator will lazily follow the continuation tokens returned by the
service and stop when all entities have been returned or max_results is
reached.
If max_results is specified and the account has more than that number of
entities, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int num_results:
The maximum number of entities to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if max_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: callback function in format of func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
:rtype: :class:`~azure.storage.models.ListGenerator` | [
"Returns",
"a",
"generator",
"to",
"list",
"the",
"entities",
"in",
"the",
"table",
"specified",
".",
"The",
"generator",
"will",
"lazily",
"follow",
"the",
"continuation",
"tokens",
"returned",
"by",
"the",
"service",
"and",
"stop",
"when",
"all",
"entities",... | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/table/tableservice.py#L593-L647 | train | 41,652 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.get_request_partition_key | def get_request_partition_key(self, request):
'''
Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces)
if part_key is None:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return _get_etree_text(part_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2] | python | def get_request_partition_key(self, request):
'''
Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
part_key = doc.find('./atom:content/m:properties/d:PartitionKey', _etree_entity_feed_namespaces)
if part_key is None:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return _get_etree_text(part_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('PartitionKey=\'')
pos2 = uri.find('\',', pos1)
if pos1 == -1 or pos2 == -1:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_PARTITION_KEY)
return uri[pos1 + len('PartitionKey=\''):pos2] | [
"def",
"get_request_partition_key",
"(",
"self",
",",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"doc",
"=",
"ETree",
".",
"fromstring",
"(",
"request",
".",
"body",
")",
"part_key",
"=",
"doc",
".",
"find",
"(",
"'./atom:c... | Extracts PartitionKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the PartitionKey is in the request body.
request:
the request to insert, update or delete entity | [
"Extracts",
"PartitionKey",
"from",
"request",
".",
"body",
"if",
"it",
"is",
"a",
"POST",
"request",
"or",
"from",
"request",
".",
"path",
"if",
"it",
"is",
"not",
"a",
"POST",
"request",
".",
"Only",
"insert",
"operation",
"request",
"is",
"a",
"POST",... | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L87-L108 | train | 41,653 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.get_request_row_key | def get_request_row_key(self, request):
'''
Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces)
if row_key is None:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
return _get_etree_text(row_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key | python | def get_request_row_key(self, request):
'''
Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request:
the request to insert, update or delete entity
'''
if request.method == 'POST':
doc = ETree.fromstring(request.body)
row_key = doc.find('./atom:content/m:properties/d:RowKey', _etree_entity_feed_namespaces)
if row_key is None:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
return _get_etree_text(row_key)
else:
uri = url_unquote(request.path)
pos1 = uri.find('RowKey=\'')
pos2 = uri.find('\')', pos1)
if pos1 == -1 or pos2 == -1:
raise AzureBatchValidationError(_ERROR_CANNOT_FIND_ROW_KEY)
row_key = uri[pos1 + len('RowKey=\''):pos2]
return row_key | [
"def",
"get_request_row_key",
"(",
"self",
",",
"request",
")",
":",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"doc",
"=",
"ETree",
".",
"fromstring",
"(",
"request",
".",
"body",
")",
"row_key",
"=",
"doc",
".",
"find",
"(",
"'./atom:content/... | Extracts RowKey from request.body if it is a POST request or from
request.path if it is not a POST request. Only insert operation request
is a POST request and the Rowkey is in the request body.
request:
the request to insert, update or delete entity | [
"Extracts",
"RowKey",
"from",
"request",
".",
"body",
"if",
"it",
"is",
"a",
"POST",
"request",
"or",
"from",
"request",
".",
"path",
"if",
"it",
"is",
"not",
"a",
"POST",
"request",
".",
"Only",
"insert",
"operation",
"request",
"is",
"a",
"POST",
"re... | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L110-L132 | train | 41,654 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.validate_request_table | def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request) | python | def validate_request_table(self, request):
'''
Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_table:
if self.get_request_table(request) != self.batch_table:
raise AzureBatchValidationError(_ERROR_INCORRECT_TABLE_IN_BATCH)
else:
self.batch_table = self.get_request_table(request) | [
"def",
"validate_request_table",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"batch_table",
":",
"if",
"self",
".",
"get_request_table",
"(",
"request",
")",
"!=",
"self",
".",
"batch_table",
":",
"raise",
"AzureBatchValidationError",
"(",
"_ERRO... | Validates that all requests have the same table name. Set the table
name if it is the first request for the batch operation.
request:
the request to insert, update or delete entity | [
"Validates",
"that",
"all",
"requests",
"have",
"the",
"same",
"table",
"name",
".",
"Set",
"the",
"table",
"name",
"if",
"it",
"is",
"the",
"first",
"request",
"for",
"the",
"batch",
"operation",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L134-L146 | train | 41,655 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.validate_request_partition_key | def validate_request_partition_key(self, request):
'''
Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != \
self.batch_partition_key:
raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request) | python | def validate_request_partition_key(self, request):
'''
Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request:
the request to insert, update or delete entity
'''
if self.batch_partition_key:
if self.get_request_partition_key(request) != \
self.batch_partition_key:
raise AzureBatchValidationError(_ERROR_INCORRECT_PARTITION_KEY_IN_BATCH)
else:
self.batch_partition_key = self.get_request_partition_key(request) | [
"def",
"validate_request_partition_key",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"batch_partition_key",
":",
"if",
"self",
".",
"get_request_partition_key",
"(",
"request",
")",
"!=",
"self",
".",
"batch_partition_key",
":",
"raise",
"AzureBatchV... | Validates that all requests have the same PartitiionKey. Set the
PartitionKey if it is the first request for the batch operation.
request:
the request to insert, update or delete entity | [
"Validates",
"that",
"all",
"requests",
"have",
"the",
"same",
"PartitiionKey",
".",
"Set",
"the",
"PartitionKey",
"if",
"it",
"is",
"the",
"first",
"request",
"for",
"the",
"batch",
"operation",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L148-L161 | train | 41,656 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.validate_request_row_key | def validate_request_row_key(self, request):
'''
Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request:
the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request)) | python | def validate_request_row_key(self, request):
'''
Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request:
the request to insert, update or delete entity
'''
if self.batch_row_keys:
if self.get_request_row_key(request) in self.batch_row_keys:
raise AzureBatchValidationError(_ERROR_DUPLICATE_ROW_KEY_IN_BATCH)
else:
self.batch_row_keys.append(self.get_request_row_key(request)) | [
"def",
"validate_request_row_key",
"(",
"self",
",",
"request",
")",
":",
"if",
"self",
".",
"batch_row_keys",
":",
"if",
"self",
".",
"get_request_row_key",
"(",
"request",
")",
"in",
"self",
".",
"batch_row_keys",
":",
"raise",
"AzureBatchValidationError",
"("... | Validates that all requests have the different RowKey and adds RowKey
to existing RowKey list.
request:
the request to insert, update or delete entity | [
"Validates",
"that",
"all",
"requests",
"have",
"the",
"different",
"RowKey",
"and",
"adds",
"RowKey",
"to",
"existing",
"RowKey",
"list",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L163-L175 | train | 41,657 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.begin_batch | def begin_batch(self):
'''
Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = [] | python | def begin_batch(self):
'''
Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests.
'''
self.is_batch = True
self.batch_table = ''
self.batch_partition_key = ''
self.batch_row_keys = []
self.batch_requests = [] | [
"def",
"begin_batch",
"(",
"self",
")",
":",
"self",
".",
"is_batch",
"=",
"True",
"self",
".",
"batch_table",
"=",
"''",
"self",
".",
"batch_partition_key",
"=",
"''",
"self",
".",
"batch_row_keys",
"=",
"[",
"]",
"self",
".",
"batch_requests",
"=",
"["... | Starts the batch operation. Intializes the batch variables
is_batch:
batch operation flag.
batch_table:
the table name of the batch operation
batch_partition_key:
the PartitionKey of the batch requests.
batch_row_keys:
the RowKey list of adding requests.
batch_requests:
the list of the requests. | [
"Starts",
"the",
"batch",
"operation",
".",
"Intializes",
"the",
"batch",
"variables"
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L177-L196 | train | 41,658 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.insert_request_to_batch | def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request:
the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request) | python | def insert_request_to_batch(self, request):
'''
Adds request to batch operation.
request:
the request to insert, update or delete entity
'''
self.validate_request_table(request)
self.validate_request_partition_key(request)
self.validate_request_row_key(request)
self.batch_requests.append(request) | [
"def",
"insert_request_to_batch",
"(",
"self",
",",
"request",
")",
":",
"self",
".",
"validate_request_table",
"(",
"request",
")",
"self",
".",
"validate_request_partition_key",
"(",
"request",
")",
"self",
".",
"validate_request_row_key",
"(",
"request",
")",
"... | Adds request to batch operation.
request:
the request to insert, update or delete entity | [
"Adds",
"request",
"to",
"batch",
"operation",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L198-L208 | train | 41,659 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_http/batchclient.py | _BatchClient.commit_batch_requests | def commit_batch_requests(self):
''' Commits the batch requests. '''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
# Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [
('Content-Type', 'multipart/mixed; boundary=' + \
batch_boundary.decode('utf-8')),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = b'--' + batch_boundary + b'\n'
request.body += b'Content-Type: multipart/mixed; boundary='
request.body += changeset_boundary + b'\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += b'--' + changeset_boundary + b'\n'
request.body += b'Content-Type: application/http\n'
request.body += b'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method.encode('utf-8')
request.body += b' http://'
request.body += batch_request.host.encode('utf-8')
request.body += batch_request.path.encode('utf-8')
request.body += b' HTTP/1.1\n'
request.body += b'Content-ID: '
request.body += str(content_id).encode('utf-8') + b'\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += \
b'Content-Type: application/atom+xml;type=entry\n'
for name, value in batch_request.headers:
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n'
break
request.body += b'Content-Length: '
request.body += str(len(batch_request.body)).encode('utf-8')
request.body += b'\n\n'
request.body += batch_request.body + b'\n'
else:
for name, value in batch_request.headers:
# If-Match should be already included in
# batch_request.headers, but in case it is missing,
# just add it.
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n\n'
break
else:
request.body += b'If-Match: *\n\n'
request.body += b'--' + changeset_boundary + b'--' + b'\n'
request.body += b'--' + batch_boundary + b'--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
self.authentication.sign_request(request)
# Submit the whole request as batch request.
response = self.perform_request(request)
if response.status >= 300:
# This exception will be caught by the general error handler
# and raised as an azure http exception
raise HTTPError(response.status,
_ERROR_BATCH_COMMIT_FAIL,
self.respheader,
response.body)
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
# The body of a ChangeSet response is either a response for all the
# successfully processed change request within the ChangeSet,
# formatted exactly as it would have appeared outside of a batch,
# or a single response indicating a failure of the entire ChangeSet.
responses = self._parse_batch_response(response.body)
if responses and responses[0].status >= 300:
self._report_batch_error(responses[0]) | python | def commit_batch_requests(self):
''' Commits the batch requests. '''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
# Commits batch only the requests list is not empty.
if self.batch_requests:
request = HTTPRequest()
request.method = 'POST'
request.host = self.batch_requests[0].host
request.path = '/$batch'
request.headers = [
('Content-Type', 'multipart/mixed; boundary=' + \
batch_boundary.decode('utf-8')),
('Accept', 'application/atom+xml,application/xml'),
('Accept-Charset', 'UTF-8')]
request.body = b'--' + batch_boundary + b'\n'
request.body += b'Content-Type: multipart/mixed; boundary='
request.body += changeset_boundary + b'\n\n'
content_id = 1
# Adds each request body to the POST data.
for batch_request in self.batch_requests:
request.body += b'--' + changeset_boundary + b'\n'
request.body += b'Content-Type: application/http\n'
request.body += b'Content-Transfer-Encoding: binary\n\n'
request.body += batch_request.method.encode('utf-8')
request.body += b' http://'
request.body += batch_request.host.encode('utf-8')
request.body += batch_request.path.encode('utf-8')
request.body += b' HTTP/1.1\n'
request.body += b'Content-ID: '
request.body += str(content_id).encode('utf-8') + b'\n'
content_id += 1
# Add different headers for different type requests.
if not batch_request.method == 'DELETE':
request.body += \
b'Content-Type: application/atom+xml;type=entry\n'
for name, value in batch_request.headers:
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n'
break
request.body += b'Content-Length: '
request.body += str(len(batch_request.body)).encode('utf-8')
request.body += b'\n\n'
request.body += batch_request.body + b'\n'
else:
for name, value in batch_request.headers:
# If-Match should be already included in
# batch_request.headers, but in case it is missing,
# just add it.
if name == 'If-Match':
request.body += name.encode('utf-8') + b': '
request.body += value.encode('utf-8') + b'\n\n'
break
else:
request.body += b'If-Match: *\n\n'
request.body += b'--' + changeset_boundary + b'--' + b'\n'
request.body += b'--' + batch_boundary + b'--'
request.path, request.query = _update_request_uri_query(request)
request.headers = _update_storage_table_header(request)
self.authentication.sign_request(request)
# Submit the whole request as batch request.
response = self.perform_request(request)
if response.status >= 300:
# This exception will be caught by the general error handler
# and raised as an azure http exception
raise HTTPError(response.status,
_ERROR_BATCH_COMMIT_FAIL,
self.respheader,
response.body)
# http://www.odata.org/documentation/odata-version-2-0/batch-processing/
# The body of a ChangeSet response is either a response for all the
# successfully processed change request within the ChangeSet,
# formatted exactly as it would have appeared outside of a batch,
# or a single response indicating a failure of the entire ChangeSet.
responses = self._parse_batch_response(response.body)
if responses and responses[0].status >= 300:
self._report_batch_error(responses[0]) | [
"def",
"commit_batch_requests",
"(",
"self",
")",
":",
"batch_boundary",
"=",
"b'batch_'",
"+",
"_new_boundary",
"(",
")",
"changeset_boundary",
"=",
"b'changeset_'",
"+",
"_new_boundary",
"(",
")",
"# Commits batch only the requests list is not empty.",
"if",
"self",
"... | Commits the batch requests. | [
"Commits",
"the",
"batch",
"requests",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_http/batchclient.py#L216-L303 | train | 41,660 |
openstack/proliantutils | proliantutils/redfish/resources/manager/manager.py | HPEManager.set_license | def set_license(self, key):
"""Set the license on a redfish system
:param key: license key
"""
data = {'LicenseKey': key}
license_service_uri = (utils.get_subresource_path_by(self,
['Oem', 'Hpe', 'Links', 'LicenseService']))
self._conn.post(license_service_uri, data=data) | python | def set_license(self, key):
"""Set the license on a redfish system
:param key: license key
"""
data = {'LicenseKey': key}
license_service_uri = (utils.get_subresource_path_by(self,
['Oem', 'Hpe', 'Links', 'LicenseService']))
self._conn.post(license_service_uri, data=data) | [
"def",
"set_license",
"(",
"self",
",",
"key",
")",
":",
"data",
"=",
"{",
"'LicenseKey'",
":",
"key",
"}",
"license_service_uri",
"=",
"(",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"[",
"'Oem'",
",",
"'Hpe'",
",",
"'Links'",
",",
"'Lic... | Set the license on a redfish system
:param key: license key | [
"Set",
"the",
"license",
"on",
"a",
"redfish",
"system"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/manager/manager.py#L31-L39 | train | 41,661 |
openstack/proliantutils | proliantutils/redfish/resources/manager/manager.py | HPEManager.virtual_media | def virtual_media(self):
"""Property to provide reference to `VirtualMediaCollection` instance.
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return virtual_media.VirtualMediaCollection(
self._conn, utils.get_subresource_path_by(self, 'VirtualMedia'),
redfish_version=self.redfish_version) | python | def virtual_media(self):
"""Property to provide reference to `VirtualMediaCollection` instance.
It is calculated once when the first time it is queried. On refresh,
this property gets reset.
"""
return virtual_media.VirtualMediaCollection(
self._conn, utils.get_subresource_path_by(self, 'VirtualMedia'),
redfish_version=self.redfish_version) | [
"def",
"virtual_media",
"(",
"self",
")",
":",
"return",
"virtual_media",
".",
"VirtualMediaCollection",
"(",
"self",
".",
"_conn",
",",
"utils",
".",
"get_subresource_path_by",
"(",
"self",
",",
"'VirtualMedia'",
")",
",",
"redfish_version",
"=",
"self",
".",
... | Property to provide reference to `VirtualMediaCollection` instance.
It is calculated once when the first time it is queried. On refresh,
this property gets reset. | [
"Property",
"to",
"provide",
"reference",
"to",
"VirtualMediaCollection",
"instance",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/manager/manager.py#L43-L51 | train | 41,662 |
NuGrid/NuGridPy | nugridpy/ascii_table.py | writeTraj | def writeTraj(filename='trajectory.input', data=[], ageunit=0, tunit=0,
rhounit=0, idNum=0):
'''
Method for writeing Trajectory type ascii files files.
Parameters
----------
filename : string
The file where this data will be written.
data : list
A list of 1D data vectors with time, T and rho.
ageunit : integer, optional
If 1 ageunit = SEC, If 0 ageunit = YRS. If 2 agunit =
logtimerev in yrs. The default is 0. logtimerev is log of
time until end
tunit : integer, optional
If 1 TUNIT = T9K, if 0 TUNIT = T8K. The default is 0.
rhounit : integer, optional
If 1 RHOUNIT = LOG, if 0 RHOUNIT = CGS. The default is 0.
idNum : optional
An optional id argument
'''
if data==[]:
print('Please input correct data')
print('returning None')
return None
headers=[]
if ageunit ==1:
headers.append('AGEUNIT = SEC')
elif ageunit==0:
headers.append('AGEUNIT = YRS')
elif ageunit==2:
headers.append('AGEUNIT = logtimerev/yrs')
if tunit ==1:
headers.append('TUNIT = T9K')
elif tunit==0:
headers.append('TUNIT = T8K')
if rhounit ==1:
headers.append('RHOUNIT = LOG')
elif rhounit==0:
headers.append('RHOUNIT = CGS')
headers.append('ID = '+str(idNum))
write(filename,headers,['time','T','rho'],data,['YRS/SEC; T8K/T9K; CGS/LOG',"FORMAT: '(10x,A3)'"],trajectory=True) | python | def writeTraj(filename='trajectory.input', data=[], ageunit=0, tunit=0,
rhounit=0, idNum=0):
'''
Method for writeing Trajectory type ascii files files.
Parameters
----------
filename : string
The file where this data will be written.
data : list
A list of 1D data vectors with time, T and rho.
ageunit : integer, optional
If 1 ageunit = SEC, If 0 ageunit = YRS. If 2 agunit =
logtimerev in yrs. The default is 0. logtimerev is log of
time until end
tunit : integer, optional
If 1 TUNIT = T9K, if 0 TUNIT = T8K. The default is 0.
rhounit : integer, optional
If 1 RHOUNIT = LOG, if 0 RHOUNIT = CGS. The default is 0.
idNum : optional
An optional id argument
'''
if data==[]:
print('Please input correct data')
print('returning None')
return None
headers=[]
if ageunit ==1:
headers.append('AGEUNIT = SEC')
elif ageunit==0:
headers.append('AGEUNIT = YRS')
elif ageunit==2:
headers.append('AGEUNIT = logtimerev/yrs')
if tunit ==1:
headers.append('TUNIT = T9K')
elif tunit==0:
headers.append('TUNIT = T8K')
if rhounit ==1:
headers.append('RHOUNIT = LOG')
elif rhounit==0:
headers.append('RHOUNIT = CGS')
headers.append('ID = '+str(idNum))
write(filename,headers,['time','T','rho'],data,['YRS/SEC; T8K/T9K; CGS/LOG',"FORMAT: '(10x,A3)'"],trajectory=True) | [
"def",
"writeTraj",
"(",
"filename",
"=",
"'trajectory.input'",
",",
"data",
"=",
"[",
"]",
",",
"ageunit",
"=",
"0",
",",
"tunit",
"=",
"0",
",",
"rhounit",
"=",
"0",
",",
"idNum",
"=",
"0",
")",
":",
"if",
"data",
"==",
"[",
"]",
":",
"print",
... | Method for writeing Trajectory type ascii files files.
Parameters
----------
filename : string
The file where this data will be written.
data : list
A list of 1D data vectors with time, T and rho.
ageunit : integer, optional
If 1 ageunit = SEC, If 0 ageunit = YRS. If 2 agunit =
logtimerev in yrs. The default is 0. logtimerev is log of
time until end
tunit : integer, optional
If 1 TUNIT = T9K, if 0 TUNIT = T8K. The default is 0.
rhounit : integer, optional
If 1 RHOUNIT = LOG, if 0 RHOUNIT = CGS. The default is 0.
idNum : optional
An optional id argument | [
"Method",
"for",
"writeing",
"Trajectory",
"type",
"ascii",
"files",
"files",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ascii_table.py#L349-L396 | train | 41,663 |
NuGrid/NuGridPy | nugridpy/ascii_table.py | readTable.get | def get(self, attri):
'''
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
'''
isCol=False
isHead=False
if attri in self.dcols:
isCol=True
elif attri in self.hattrs:
isHead=True
else:
print("That attribute does not exist in this File")
print('Returning None')
if isCol:
return self.getColData(attri)
elif isHead:
return hattrs | python | def get(self, attri):
'''
Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for.
'''
isCol=False
isHead=False
if attri in self.dcols:
isCol=True
elif attri in self.hattrs:
isHead=True
else:
print("That attribute does not exist in this File")
print('Returning None')
if isCol:
return self.getColData(attri)
elif isHead:
return hattrs | [
"def",
"get",
"(",
"self",
",",
"attri",
")",
":",
"isCol",
"=",
"False",
"isHead",
"=",
"False",
"if",
"attri",
"in",
"self",
".",
"dcols",
":",
"isCol",
"=",
"True",
"elif",
"attri",
"in",
"self",
".",
"hattrs",
":",
"isHead",
"=",
"True",
"else"... | Method that dynamically determines the type of attribute that is
passed into this method. Also it then returns that attribute's
associated data.
Parameters
----------
attri : string
The attribute we are looking for. | [
"Method",
"that",
"dynamically",
"determines",
"the",
"type",
"of",
"attribute",
"that",
"is",
"passed",
"into",
"this",
"method",
".",
"Also",
"it",
"then",
"returns",
"that",
"attribute",
"s",
"associated",
"data",
"."
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/nugridpy/ascii_table.py#L208-L234 | train | 41,664 |
NuGrid/NuGridPy | scripts/nugrid_set/nugrid_set.py | initial_finall_mass_relation | def initial_finall_mass_relation(self,marker='o',linestyle='--'):
'''
INtiial to final mass relation
'''
final_m=[]
ini_m=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
ini_m.append(sefiles.get("mini"))
h1=sefiles.get(int(sefiles.se.cycles[-2]),'H-1')
mass=sefiles.get(int(sefiles.se.cycles[-2]),'mass')
idx=-1
for k in range(len(h1)):
if h1[k]>0.1:
idx=k
break
final_m.append(mass[idx])
label='Z='+str(sefiles.get('zini'))
plt.plot(ini_m,final_m,label=label,marker=marker,linestyle=linestyle)
plt.xlabel('$M_{Initial} [M_{\odot}]$',size=23)
plt.ylabel('$M_{Final} [M_{\odot}]$',size=23) | python | def initial_finall_mass_relation(self,marker='o',linestyle='--'):
'''
INtiial to final mass relation
'''
final_m=[]
ini_m=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
ini_m.append(sefiles.get("mini"))
h1=sefiles.get(int(sefiles.se.cycles[-2]),'H-1')
mass=sefiles.get(int(sefiles.se.cycles[-2]),'mass')
idx=-1
for k in range(len(h1)):
if h1[k]>0.1:
idx=k
break
final_m.append(mass[idx])
label='Z='+str(sefiles.get('zini'))
plt.plot(ini_m,final_m,label=label,marker=marker,linestyle=linestyle)
plt.xlabel('$M_{Initial} [M_{\odot}]$',size=23)
plt.ylabel('$M_{Final} [M_{\odot}]$',size=23) | [
"def",
"initial_finall_mass_relation",
"(",
"self",
",",
"marker",
"=",
"'o'",
",",
"linestyle",
"=",
"'--'",
")",
":",
"final_m",
"=",
"[",
"]",
"ini_m",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"runs_H5_surf",
")",
")... | INtiial to final mass relation | [
"INtiial",
"to",
"final",
"mass",
"relation"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/scripts/nugrid_set/nugrid_set.py#L304-L325 | train | 41,665 |
NuGrid/NuGridPy | scripts/nugrid_set/nugrid_set.py | set_cores_massive | def set_cores_massive(self,filename='core_masses_massive.txt'):
'''
Uesse function cores in nugridse.py
'''
core_info=[]
minis=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
mini=sefiles.get('mini')
minis.append(mini)
incycle=int(sefiles.se.cycles[-1])
core_info.append(sefiles.cores(incycle=incycle))
print_info=''
for i in range(len(self.runs_H5_surf)):
if i ==0:
print 'Following returned for each initial mass'
print core_info[i][1]
#print '----Mini: ',minis[i],'------'
print_info+=(str(minis[i])+' & ')
info=core_info[i][0]
for k in range(len(info)):
print_info+=('{:.3E}'.format(float(core_info[i][0][k]))+' & ')
print_info=(print_info+'\n')
#print core_info[i][2]
f1=open(filename,'a')
f1.write(print_info)
f1.close() | python | def set_cores_massive(self,filename='core_masses_massive.txt'):
'''
Uesse function cores in nugridse.py
'''
core_info=[]
minis=[]
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_out[i])
mini=sefiles.get('mini')
minis.append(mini)
incycle=int(sefiles.se.cycles[-1])
core_info.append(sefiles.cores(incycle=incycle))
print_info=''
for i in range(len(self.runs_H5_surf)):
if i ==0:
print 'Following returned for each initial mass'
print core_info[i][1]
#print '----Mini: ',minis[i],'------'
print_info+=(str(minis[i])+' & ')
info=core_info[i][0]
for k in range(len(info)):
print_info+=('{:.3E}'.format(float(core_info[i][0][k]))+' & ')
print_info=(print_info+'\n')
#print core_info[i][2]
f1=open(filename,'a')
f1.write(print_info)
f1.close() | [
"def",
"set_cores_massive",
"(",
"self",
",",
"filename",
"=",
"'core_masses_massive.txt'",
")",
":",
"core_info",
"=",
"[",
"]",
"minis",
"=",
"[",
"]",
"for",
"i",
"in",
"range",
"(",
"len",
"(",
"self",
".",
"runs_H5_surf",
")",
")",
":",
"sefiles",
... | Uesse function cores in nugridse.py | [
"Uesse",
"function",
"cores",
"in",
"nugridse",
".",
"py"
] | eee8047446e398be77362d82c1d8b3310054fab0 | https://github.com/NuGrid/NuGridPy/blob/eee8047446e398be77362d82c1d8b3310054fab0/scripts/nugrid_set/nugrid_set.py#L1550-L1578 | train | 41,666 |
Azure/azure-multiapi-storage-python | azure/multiapi/storage/v2015_04_05/_serialization.py | _get_request_body_bytes_only | def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) | python | def _get_request_body_bytes_only(param_name, param_value):
'''Validates the request body passed in and converts it to bytes
if our policy allows it.'''
if param_value is None:
return b''
if isinstance(param_value, bytes):
return param_value
raise TypeError(_ERROR_VALUE_SHOULD_BE_BYTES.format(param_name)) | [
"def",
"_get_request_body_bytes_only",
"(",
"param_name",
",",
"param_value",
")",
":",
"if",
"param_value",
"is",
"None",
":",
"return",
"b''",
"if",
"isinstance",
"(",
"param_value",
",",
"bytes",
")",
":",
"return",
"param_value",
"raise",
"TypeError",
"(",
... | Validates the request body passed in and converts it to bytes
if our policy allows it. | [
"Validates",
"the",
"request",
"body",
"passed",
"in",
"and",
"converts",
"it",
"to",
"bytes",
"if",
"our",
"policy",
"allows",
"it",
"."
] | bd5482547f993c6eb56fd09070e15c2e9616e440 | https://github.com/Azure/azure-multiapi-storage-python/blob/bd5482547f993c6eb56fd09070e15c2e9616e440/azure/multiapi/storage/v2015_04_05/_serialization.py#L96-L105 | train | 41,667 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/common.py | _get_attribute_value_of | def _get_attribute_value_of(resource, attribute_name, default=None):
"""Gets the value of attribute_name from the resource
It catches the exception, if any, while retrieving the
value of attribute_name from resource and returns default.
:param resource: The resource object
:attribute_name: Property of the resource
:returns the property value if no error encountered
else return 0.
"""
try:
return getattr(resource, attribute_name)
except (sushy.exceptions.SushyError,
exception.MissingAttributeError) as e:
msg = (('The Redfish controller failed to get the '
'attribute %(attribute)s from resource %(resource)s. '
'Error %(error)s') % {'error': str(e),
'attribute': attribute_name,
'resource':
resource.__class__.__name__})
LOG.debug(msg)
return default | python | def _get_attribute_value_of(resource, attribute_name, default=None):
"""Gets the value of attribute_name from the resource
It catches the exception, if any, while retrieving the
value of attribute_name from resource and returns default.
:param resource: The resource object
:attribute_name: Property of the resource
:returns the property value if no error encountered
else return 0.
"""
try:
return getattr(resource, attribute_name)
except (sushy.exceptions.SushyError,
exception.MissingAttributeError) as e:
msg = (('The Redfish controller failed to get the '
'attribute %(attribute)s from resource %(resource)s. '
'Error %(error)s') % {'error': str(e),
'attribute': attribute_name,
'resource':
resource.__class__.__name__})
LOG.debug(msg)
return default | [
"def",
"_get_attribute_value_of",
"(",
"resource",
",",
"attribute_name",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"return",
"getattr",
"(",
"resource",
",",
"attribute_name",
")",
"except",
"(",
"sushy",
".",
"exceptions",
".",
"SushyError",
",",
... | Gets the value of attribute_name from the resource
It catches the exception, if any, while retrieving the
value of attribute_name from resource and returns default.
:param resource: The resource object
:attribute_name: Property of the resource
:returns the property value if no error encountered
else return 0. | [
"Gets",
"the",
"value",
"of",
"attribute_name",
"from",
"the",
"resource"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/common.py#L26-L48 | train | 41,668 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/common.py | get_local_gb | def get_local_gb(system_obj):
"""Gets the largest volume or the largest disk
:param system_obj: The HPESystem object.
:returns the size in GB
"""
local_max_bytes = 0
logical_max_mib = 0
volume_max_bytes = 0
physical_max_mib = 0
drives_max_bytes = 0
simple_max_bytes = 0
# Gets the resources and properties
# its quite possible for a system to lack the resource, hence its
# URI may also be lacking.
# Check if smart_storage resource exist at the system
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
# Check if storage resource exist at the system
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if smart_resource is not None:
logical_max_mib = _get_attribute_value_of(
smart_resource, 'logical_drives_maximum_size_mib', default=0)
if storage_resource is not None:
volume_max_bytes = _get_attribute_value_of(
storage_resource, 'volumes_maximum_size_bytes', default=0)
# Get the largest volume from the system.
local_max_bytes = utils.max_safe([(logical_max_mib * 1024 * 1024),
volume_max_bytes])
# if volume is not found, then traverse through the possible disk drives
# and get the biggest disk.
if local_max_bytes == 0:
if smart_resource is not None:
physical_max_mib = _get_attribute_value_of(
smart_resource, 'physical_drives_maximum_size_mib', default=0)
if storage_resource is not None:
drives_max_bytes = _get_attribute_value_of(
storage_resource, 'drives_maximum_size_bytes', default=0)
# Check if the SimpleStorage resource exist at the system.
simple_resource = _get_attribute_value_of(system_obj,
'simple_storages')
if simple_resource is not None:
simple_max_bytes = _get_attribute_value_of(
simple_resource, 'maximum_size_bytes', default=0)
local_max_bytes = utils.max_safe([(physical_max_mib * 1024 * 1024),
drives_max_bytes, simple_max_bytes])
# Convert the received size to GB and reduce the value by 1 Gb as
# ironic requires the local_gb to be returned 1 less than actual size.
local_gb = 0
if local_max_bytes > 0:
local_gb = int(local_max_bytes / (1024 * 1024 * 1024)) - 1
else:
msg = ('The maximum size for the hard disk or logical '
'volume could not be determined.')
LOG.debug(msg)
return local_gb | python | def get_local_gb(system_obj):
"""Gets the largest volume or the largest disk
:param system_obj: The HPESystem object.
:returns the size in GB
"""
local_max_bytes = 0
logical_max_mib = 0
volume_max_bytes = 0
physical_max_mib = 0
drives_max_bytes = 0
simple_max_bytes = 0
# Gets the resources and properties
# its quite possible for a system to lack the resource, hence its
# URI may also be lacking.
# Check if smart_storage resource exist at the system
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
# Check if storage resource exist at the system
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if smart_resource is not None:
logical_max_mib = _get_attribute_value_of(
smart_resource, 'logical_drives_maximum_size_mib', default=0)
if storage_resource is not None:
volume_max_bytes = _get_attribute_value_of(
storage_resource, 'volumes_maximum_size_bytes', default=0)
# Get the largest volume from the system.
local_max_bytes = utils.max_safe([(logical_max_mib * 1024 * 1024),
volume_max_bytes])
# if volume is not found, then traverse through the possible disk drives
# and get the biggest disk.
if local_max_bytes == 0:
if smart_resource is not None:
physical_max_mib = _get_attribute_value_of(
smart_resource, 'physical_drives_maximum_size_mib', default=0)
if storage_resource is not None:
drives_max_bytes = _get_attribute_value_of(
storage_resource, 'drives_maximum_size_bytes', default=0)
# Check if the SimpleStorage resource exist at the system.
simple_resource = _get_attribute_value_of(system_obj,
'simple_storages')
if simple_resource is not None:
simple_max_bytes = _get_attribute_value_of(
simple_resource, 'maximum_size_bytes', default=0)
local_max_bytes = utils.max_safe([(physical_max_mib * 1024 * 1024),
drives_max_bytes, simple_max_bytes])
# Convert the received size to GB and reduce the value by 1 Gb as
# ironic requires the local_gb to be returned 1 less than actual size.
local_gb = 0
if local_max_bytes > 0:
local_gb = int(local_max_bytes / (1024 * 1024 * 1024)) - 1
else:
msg = ('The maximum size for the hard disk or logical '
'volume could not be determined.')
LOG.debug(msg)
return local_gb | [
"def",
"get_local_gb",
"(",
"system_obj",
")",
":",
"local_max_bytes",
"=",
"0",
"logical_max_mib",
"=",
"0",
"volume_max_bytes",
"=",
"0",
"physical_max_mib",
"=",
"0",
"drives_max_bytes",
"=",
"0",
"simple_max_bytes",
"=",
"0",
"# Gets the resources and properties",... | Gets the largest volume or the largest disk
:param system_obj: The HPESystem object.
:returns the size in GB | [
"Gets",
"the",
"largest",
"volume",
"or",
"the",
"largest",
"disk"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/common.py#L51-L112 | train | 41,669 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/common.py | has_ssd | def has_ssd(system_obj):
"""Gets if the system has any drive as SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives.
"""
smart_value = False
storage_value = False
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
if smart_resource is not None:
smart_value = _get_attribute_value_of(
smart_resource, 'has_ssd', default=False)
if smart_value:
return smart_value
# Its returned before just to avoid hitting BMC if we have
# already got the SSD device above.
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, 'has_ssd', default=False)
return storage_value | python | def has_ssd(system_obj):
"""Gets if the system has any drive as SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives.
"""
smart_value = False
storage_value = False
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
if smart_resource is not None:
smart_value = _get_attribute_value_of(
smart_resource, 'has_ssd', default=False)
if smart_value:
return smart_value
# Its returned before just to avoid hitting BMC if we have
# already got the SSD device above.
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, 'has_ssd', default=False)
return storage_value | [
"def",
"has_ssd",
"(",
"system_obj",
")",
":",
"smart_value",
"=",
"False",
"storage_value",
"=",
"False",
"smart_resource",
"=",
"_get_attribute_value_of",
"(",
"system_obj",
",",
"'smart_storage'",
")",
"if",
"smart_resource",
"is",
"not",
"None",
":",
"smart_va... | Gets if the system has any drive as SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives. | [
"Gets",
"if",
"the",
"system",
"has",
"any",
"drive",
"as",
"SSD",
"drive"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/common.py#L115-L137 | train | 41,670 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/common.py | has_nvme_ssd | def has_nvme_ssd(system_obj):
"""Gets if the system has any drive as NVMe SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives and protocol is NVMe.
"""
storage_value = False
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, 'has_nvme_ssd', default=False)
return storage_value | python | def has_nvme_ssd(system_obj):
"""Gets if the system has any drive as NVMe SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives and protocol is NVMe.
"""
storage_value = False
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
storage_value = _get_attribute_value_of(
storage_resource, 'has_nvme_ssd', default=False)
return storage_value | [
"def",
"has_nvme_ssd",
"(",
"system_obj",
")",
":",
"storage_value",
"=",
"False",
"storage_resource",
"=",
"_get_attribute_value_of",
"(",
"system_obj",
",",
"'storages'",
")",
"if",
"storage_resource",
"is",
"not",
"None",
":",
"storage_value",
"=",
"_get_attribut... | Gets if the system has any drive as NVMe SSD drive
:param system_obj: The HPESystem object.
:returns True if system has SSD drives and protocol is NVMe. | [
"Gets",
"if",
"the",
"system",
"has",
"any",
"drive",
"as",
"NVMe",
"SSD",
"drive"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/common.py#L165-L177 | train | 41,671 |
openstack/proliantutils | proliantutils/redfish/resources/system/storage/common.py | get_drive_rotational_speed_rpm | def get_drive_rotational_speed_rpm(system_obj):
"""Gets the set of rotational speed rpms of the disks.
:param system_obj: The HPESystem object.
:returns the set of rotational speed rpms of the HDD devices.
"""
speed = set()
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
if smart_resource is not None:
speed.update(_get_attribute_value_of(
smart_resource, 'drive_rotational_speed_rpm', default=set()))
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
speed.update(_get_attribute_value_of(
storage_resource, 'drive_rotational_speed_rpm', default=set()))
return speed | python | def get_drive_rotational_speed_rpm(system_obj):
"""Gets the set of rotational speed rpms of the disks.
:param system_obj: The HPESystem object.
:returns the set of rotational speed rpms of the HDD devices.
"""
speed = set()
smart_resource = _get_attribute_value_of(system_obj, 'smart_storage')
if smart_resource is not None:
speed.update(_get_attribute_value_of(
smart_resource, 'drive_rotational_speed_rpm', default=set()))
storage_resource = _get_attribute_value_of(system_obj, 'storages')
if storage_resource is not None:
speed.update(_get_attribute_value_of(
storage_resource, 'drive_rotational_speed_rpm', default=set()))
return speed | [
"def",
"get_drive_rotational_speed_rpm",
"(",
"system_obj",
")",
":",
"speed",
"=",
"set",
"(",
")",
"smart_resource",
"=",
"_get_attribute_value_of",
"(",
"system_obj",
",",
"'smart_storage'",
")",
"if",
"smart_resource",
"is",
"not",
"None",
":",
"speed",
".",
... | Gets the set of rotational speed rpms of the disks.
:param system_obj: The HPESystem object.
:returns the set of rotational speed rpms of the HDD devices. | [
"Gets",
"the",
"set",
"of",
"rotational",
"speed",
"rpms",
"of",
"the",
"disks",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/redfish/resources/system/storage/common.py#L180-L195 | train | 41,672 |
openstack/proliantutils | proliantutils/ipa_hw_manager/hardware_manager.py | ProliantHardwareManager.create_configuration | def create_configuration(self, node, ports):
"""Create RAID configuration on the bare metal.
This method creates the desired RAID configuration as read from
node['target_raid_config'].
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: The current RAID configuration of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
target_raid_config = node.get('target_raid_config', {}).copy()
return hpssa_manager.create_configuration(
raid_config=target_raid_config) | python | def create_configuration(self, node, ports):
"""Create RAID configuration on the bare metal.
This method creates the desired RAID configuration as read from
node['target_raid_config'].
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: The current RAID configuration of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
}
"""
target_raid_config = node.get('target_raid_config', {}).copy()
return hpssa_manager.create_configuration(
raid_config=target_raid_config) | [
"def",
"create_configuration",
"(",
"self",
",",
"node",
",",
"ports",
")",
":",
"target_raid_config",
"=",
"node",
".",
"get",
"(",
"'target_raid_config'",
",",
"{",
"}",
")",
".",
"copy",
"(",
")",
"return",
"hpssa_manager",
".",
"create_configuration",
"(... | Create RAID configuration on the bare metal.
This method creates the desired RAID configuration as read from
node['target_raid_config'].
:param node: A dictionary of the node object
:param ports: A list of dictionaries containing information of ports
for the node
:returns: The current RAID configuration of the below format.
raid_config = {
'logical_disks': [{
'size_gb': 100,
'raid_level': 1,
'physical_disks': [
'5I:0:1',
'5I:0:2'],
'controller': 'Smart array controller'
},
]
} | [
"Create",
"RAID",
"configuration",
"on",
"the",
"bare",
"metal",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ipa_hw_manager/hardware_manager.py#L55-L79 | train | 41,673 |
openstack/proliantutils | proliantutils/ipa_hw_manager/hardware_manager.py | ProliantHardwareManager.erase_devices | def erase_devices(self, node, port):
"""Erase the drives on the bare metal.
This method erase all the drives which supports sanitize and the drives
which are not part of any logical volume on the bare metal. It calls
generic erase method after the success of Sanitize disk erase.
:param node: A dictionary of the node object.
:param port: A list of dictionaries containing information of ports
for the node.
:raises exception.HPSSAOperationError, if there is a failure on the
erase operation on the controllers.
:returns: The dictionary of controllers with the drives and erase
status for each drive.
"""
result = {}
result['Disk Erase Status'] = hpssa_manager.erase_devices()
result.update(super(ProliantHardwareManager,
self).erase_devices(node, port))
return result | python | def erase_devices(self, node, port):
"""Erase the drives on the bare metal.
This method erase all the drives which supports sanitize and the drives
which are not part of any logical volume on the bare metal. It calls
generic erase method after the success of Sanitize disk erase.
:param node: A dictionary of the node object.
:param port: A list of dictionaries containing information of ports
for the node.
:raises exception.HPSSAOperationError, if there is a failure on the
erase operation on the controllers.
:returns: The dictionary of controllers with the drives and erase
status for each drive.
"""
result = {}
result['Disk Erase Status'] = hpssa_manager.erase_devices()
result.update(super(ProliantHardwareManager,
self).erase_devices(node, port))
return result | [
"def",
"erase_devices",
"(",
"self",
",",
"node",
",",
"port",
")",
":",
"result",
"=",
"{",
"}",
"result",
"[",
"'Disk Erase Status'",
"]",
"=",
"hpssa_manager",
".",
"erase_devices",
"(",
")",
"result",
".",
"update",
"(",
"super",
"(",
"ProliantHardware... | Erase the drives on the bare metal.
This method erase all the drives which supports sanitize and the drives
which are not part of any logical volume on the bare metal. It calls
generic erase method after the success of Sanitize disk erase.
:param node: A dictionary of the node object.
:param port: A list of dictionaries containing information of ports
for the node.
:raises exception.HPSSAOperationError, if there is a failure on the
erase operation on the controllers.
:returns: The dictionary of controllers with the drives and erase
status for each drive. | [
"Erase",
"the",
"drives",
"on",
"the",
"bare",
"metal",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ipa_hw_manager/hardware_manager.py#L91-L110 | train | 41,674 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.init_model_based_tags | def init_model_based_tags(self, model):
"""Initializing the model based memory and NIC information tags.
It should be called just after instantiating a RIBCL object.
ribcl = ribcl.RIBCLOperations(host, login, password, timeout,
port, cacert=cacert)
model = ribcl.get_product_name()
ribcl.init_model_based_tags(model)
Again, model attribute is also set here on the RIBCL object.
:param model: the model string
"""
self.model = model
if 'G7' in self.model:
self.MEMORY_SIZE_TAG = "MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed"
self.NIC_INFORMATION_TAG = "NIC_INFOMATION"
else:
self.MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "N/A"
self.NIC_INFORMATION_TAG = "NIC_INFORMATION" | python | def init_model_based_tags(self, model):
"""Initializing the model based memory and NIC information tags.
It should be called just after instantiating a RIBCL object.
ribcl = ribcl.RIBCLOperations(host, login, password, timeout,
port, cacert=cacert)
model = ribcl.get_product_name()
ribcl.init_model_based_tags(model)
Again, model attribute is also set here on the RIBCL object.
:param model: the model string
"""
self.model = model
if 'G7' in self.model:
self.MEMORY_SIZE_TAG = "MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed"
self.NIC_INFORMATION_TAG = "NIC_INFOMATION"
else:
self.MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE"
self.MEMORY_SIZE_NOT_PRESENT_TAG = "N/A"
self.NIC_INFORMATION_TAG = "NIC_INFORMATION" | [
"def",
"init_model_based_tags",
"(",
"self",
",",
"model",
")",
":",
"self",
".",
"model",
"=",
"model",
"if",
"'G7'",
"in",
"self",
".",
"model",
":",
"self",
".",
"MEMORY_SIZE_TAG",
"=",
"\"MEMORY_SIZE\"",
"self",
".",
"MEMORY_SIZE_NOT_PRESENT_TAG",
"=",
"... | Initializing the model based memory and NIC information tags.
It should be called just after instantiating a RIBCL object.
ribcl = ribcl.RIBCLOperations(host, login, password, timeout,
port, cacert=cacert)
model = ribcl.get_product_name()
ribcl.init_model_based_tags(model)
Again, model attribute is also set here on the RIBCL object.
:param model: the model string | [
"Initializing",
"the",
"model",
"based",
"memory",
"and",
"NIC",
"information",
"tags",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L96-L118 | train | 41,675 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._request_ilo | def _request_ilo(self, root, extra_headers=None):
"""Send RIBCL XML data to iLO.
This function sends the XML request to the ILO and
receives the output from ILO.
:raises: IloConnectionError() if unable to send the request.
"""
if self.port:
urlstr = 'https://%s:%d/ribcl' % (self.host, self.port)
else:
urlstr = 'https://%s/ribcl' % (self.host)
xml = self._serialize_xml(root)
headers = {"Content-length": str(len(xml))}
if extra_headers:
headers.update(extra_headers)
kwargs = {'headers': headers, 'data': xml}
if self.cacert is not None:
kwargs['verify'] = self.cacert
else:
kwargs['verify'] = False
try:
LOG.debug(self._("POST %(url)s with request data: "
"%(request_data)s"),
{'url': urlstr,
'request_data': MaskedRequestData(kwargs)})
response = requests.post(urlstr, **kwargs)
response.raise_for_status()
except Exception as e:
LOG.debug(self._("Unable to connect to iLO. %s"), e)
raise exception.IloConnectionError(e)
return response.text | python | def _request_ilo(self, root, extra_headers=None):
"""Send RIBCL XML data to iLO.
This function sends the XML request to the ILO and
receives the output from ILO.
:raises: IloConnectionError() if unable to send the request.
"""
if self.port:
urlstr = 'https://%s:%d/ribcl' % (self.host, self.port)
else:
urlstr = 'https://%s/ribcl' % (self.host)
xml = self._serialize_xml(root)
headers = {"Content-length": str(len(xml))}
if extra_headers:
headers.update(extra_headers)
kwargs = {'headers': headers, 'data': xml}
if self.cacert is not None:
kwargs['verify'] = self.cacert
else:
kwargs['verify'] = False
try:
LOG.debug(self._("POST %(url)s with request data: "
"%(request_data)s"),
{'url': urlstr,
'request_data': MaskedRequestData(kwargs)})
response = requests.post(urlstr, **kwargs)
response.raise_for_status()
except Exception as e:
LOG.debug(self._("Unable to connect to iLO. %s"), e)
raise exception.IloConnectionError(e)
return response.text | [
"def",
"_request_ilo",
"(",
"self",
",",
"root",
",",
"extra_headers",
"=",
"None",
")",
":",
"if",
"self",
".",
"port",
":",
"urlstr",
"=",
"'https://%s:%d/ribcl'",
"%",
"(",
"self",
".",
"host",
",",
"self",
".",
"port",
")",
"else",
":",
"urlstr",
... | Send RIBCL XML data to iLO.
This function sends the XML request to the ILO and
receives the output from ILO.
:raises: IloConnectionError() if unable to send the request. | [
"Send",
"RIBCL",
"XML",
"data",
"to",
"iLO",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L120-L153 | train | 41,676 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._create_dynamic_xml | def _create_dynamic_xml(self, cmdname, tag_name, mode, subelements=None):
"""Create RIBCL XML to send to iLO.
This function creates the dynamic xml required to be sent
to the ILO for all the APIs.
:param cmdname: the API which needs to be implemented.
:param tag_name: the tag info under which ILO has defined
the particular API.
:param mode: 'read' or 'write'
:param subelements: dictionary containing subelements of the
particular API tree.
:returns: the etree.Element for the root of the RIBCL XML
"""
root = etree.Element('RIBCL', VERSION="2.0")
login = etree.SubElement(
root, 'LOGIN', USER_LOGIN=self.login, PASSWORD=self.password)
tagname = etree.SubElement(login, tag_name, MODE=mode)
subelements = subelements or {}
etree.SubElement(tagname, cmdname)
if six.PY2:
root_iterator = root.getiterator(cmdname)
else:
root_iterator = root.iter(cmdname)
for cmd in root_iterator:
for key, value in subelements.items():
cmd.set(key, value)
return root | python | def _create_dynamic_xml(self, cmdname, tag_name, mode, subelements=None):
"""Create RIBCL XML to send to iLO.
This function creates the dynamic xml required to be sent
to the ILO for all the APIs.
:param cmdname: the API which needs to be implemented.
:param tag_name: the tag info under which ILO has defined
the particular API.
:param mode: 'read' or 'write'
:param subelements: dictionary containing subelements of the
particular API tree.
:returns: the etree.Element for the root of the RIBCL XML
"""
root = etree.Element('RIBCL', VERSION="2.0")
login = etree.SubElement(
root, 'LOGIN', USER_LOGIN=self.login, PASSWORD=self.password)
tagname = etree.SubElement(login, tag_name, MODE=mode)
subelements = subelements or {}
etree.SubElement(tagname, cmdname)
if six.PY2:
root_iterator = root.getiterator(cmdname)
else:
root_iterator = root.iter(cmdname)
for cmd in root_iterator:
for key, value in subelements.items():
cmd.set(key, value)
return root | [
"def",
"_create_dynamic_xml",
"(",
"self",
",",
"cmdname",
",",
"tag_name",
",",
"mode",
",",
"subelements",
"=",
"None",
")",
":",
"root",
"=",
"etree",
".",
"Element",
"(",
"'RIBCL'",
",",
"VERSION",
"=",
"\"2.0\"",
")",
"login",
"=",
"etree",
".",
"... | Create RIBCL XML to send to iLO.
This function creates the dynamic xml required to be sent
to the ILO for all the APIs.
:param cmdname: the API which needs to be implemented.
:param tag_name: the tag info under which ILO has defined
the particular API.
:param mode: 'read' or 'write'
:param subelements: dictionary containing subelements of the
particular API tree.
:returns: the etree.Element for the root of the RIBCL XML | [
"Create",
"RIBCL",
"XML",
"to",
"send",
"to",
"iLO",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L155-L186 | train | 41,677 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._serialize_xml | def _serialize_xml(self, root):
"""Serialize XML data into string
It serializes the dynamic xml created and converts
it to a string. This is done before sending the
xml to the ILO.
:param root: root of the dynamic xml.
"""
if hasattr(etree, 'tostringlist'):
if six.PY3:
xml_content_list = [
x.decode("utf-8") for x in etree.tostringlist(root)]
else:
xml_content_list = etree.tostringlist(root)
xml = '\r\n'.join(xml_content_list) + '\r\n'
else:
if six.PY3:
xml_content = etree.tostring(root).decode("utf-8")
else:
xml_content = etree.tostring(root)
xml = xml_content + '\r\n'
return xml | python | def _serialize_xml(self, root):
"""Serialize XML data into string
It serializes the dynamic xml created and converts
it to a string. This is done before sending the
xml to the ILO.
:param root: root of the dynamic xml.
"""
if hasattr(etree, 'tostringlist'):
if six.PY3:
xml_content_list = [
x.decode("utf-8") for x in etree.tostringlist(root)]
else:
xml_content_list = etree.tostringlist(root)
xml = '\r\n'.join(xml_content_list) + '\r\n'
else:
if six.PY3:
xml_content = etree.tostring(root).decode("utf-8")
else:
xml_content = etree.tostring(root)
xml = xml_content + '\r\n'
return xml | [
"def",
"_serialize_xml",
"(",
"self",
",",
"root",
")",
":",
"if",
"hasattr",
"(",
"etree",
",",
"'tostringlist'",
")",
":",
"if",
"six",
".",
"PY3",
":",
"xml_content_list",
"=",
"[",
"x",
".",
"decode",
"(",
"\"utf-8\"",
")",
"for",
"x",
"in",
"etr... | Serialize XML data into string
It serializes the dynamic xml created and converts
it to a string. This is done before sending the
xml to the ILO.
:param root: root of the dynamic xml. | [
"Serialize",
"XML",
"data",
"into",
"string"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L188-L211 | train | 41,678 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._parse_output | def _parse_output(self, xml_response):
"""Parse the response XML from iLO.
This function parses the output received from ILO.
As the output contains multiple XMLs, it extracts
one xml at a time and loops over till all the xmls
in the response are exhausted.
It returns the data to APIs either in dictionary
format or as the string.
It creates the dictionary only if the Ilo response
contains the data under the requested RIBCL command.
If the Ilo response contains only the string,
then the string is returned back.
"""
count = 0
xml_dict = {}
resp_message = None
xml_start_pos = []
for m in re.finditer(r"\<\?xml", xml_response):
xml_start_pos.append(m.start())
while count < len(xml_start_pos):
if (count == len(xml_start_pos) - 1):
result = xml_response[xml_start_pos[count]:]
else:
start = xml_start_pos[count]
end = xml_start_pos[count + 1]
result = xml_response[start:end]
result = result.strip()
message = etree.fromstring(result)
resp = self._validate_message(message)
if hasattr(resp, 'tag'):
xml_dict = self._elementtree_to_dict(resp)
elif resp is not None:
resp_message = resp
count = count + 1
if xml_dict:
return xml_dict
elif resp_message is not None:
return resp_message | python | def _parse_output(self, xml_response):
"""Parse the response XML from iLO.
This function parses the output received from ILO.
As the output contains multiple XMLs, it extracts
one xml at a time and loops over till all the xmls
in the response are exhausted.
It returns the data to APIs either in dictionary
format or as the string.
It creates the dictionary only if the Ilo response
contains the data under the requested RIBCL command.
If the Ilo response contains only the string,
then the string is returned back.
"""
count = 0
xml_dict = {}
resp_message = None
xml_start_pos = []
for m in re.finditer(r"\<\?xml", xml_response):
xml_start_pos.append(m.start())
while count < len(xml_start_pos):
if (count == len(xml_start_pos) - 1):
result = xml_response[xml_start_pos[count]:]
else:
start = xml_start_pos[count]
end = xml_start_pos[count + 1]
result = xml_response[start:end]
result = result.strip()
message = etree.fromstring(result)
resp = self._validate_message(message)
if hasattr(resp, 'tag'):
xml_dict = self._elementtree_to_dict(resp)
elif resp is not None:
resp_message = resp
count = count + 1
if xml_dict:
return xml_dict
elif resp_message is not None:
return resp_message | [
"def",
"_parse_output",
"(",
"self",
",",
"xml_response",
")",
":",
"count",
"=",
"0",
"xml_dict",
"=",
"{",
"}",
"resp_message",
"=",
"None",
"xml_start_pos",
"=",
"[",
"]",
"for",
"m",
"in",
"re",
".",
"finditer",
"(",
"r\"\\<\\?xml\"",
",",
"xml_respo... | Parse the response XML from iLO.
This function parses the output received from ILO.
As the output contains multiple XMLs, it extracts
one xml at a time and loops over till all the xmls
in the response are exhausted.
It returns the data to APIs either in dictionary
format or as the string.
It creates the dictionary only if the Ilo response
contains the data under the requested RIBCL command.
If the Ilo response contains only the string,
then the string is returned back. | [
"Parse",
"the",
"response",
"XML",
"from",
"iLO",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L213-L253 | train | 41,679 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._elementtree_to_dict | def _elementtree_to_dict(self, element):
"""Convert XML elementtree to dictionary.
Converts the actual response from the ILO for an API
to the dictionary.
"""
node = {}
text = getattr(element, 'text')
if text is not None:
text = text.strip()
if len(text) != 0:
node['text'] = text
node.update(element.items()) # element's attributes
child_nodes = {}
for child in element: # element's children
child_nodes.setdefault(child.tag, []).append(
self._elementtree_to_dict(child))
# convert all single-element lists into non-lists
for key, value in child_nodes.items():
if len(value) == 1:
child_nodes[key] = value[0]
node.update(child_nodes.items())
return node | python | def _elementtree_to_dict(self, element):
"""Convert XML elementtree to dictionary.
Converts the actual response from the ILO for an API
to the dictionary.
"""
node = {}
text = getattr(element, 'text')
if text is not None:
text = text.strip()
if len(text) != 0:
node['text'] = text
node.update(element.items()) # element's attributes
child_nodes = {}
for child in element: # element's children
child_nodes.setdefault(child.tag, []).append(
self._elementtree_to_dict(child))
# convert all single-element lists into non-lists
for key, value in child_nodes.items():
if len(value) == 1:
child_nodes[key] = value[0]
node.update(child_nodes.items())
return node | [
"def",
"_elementtree_to_dict",
"(",
"self",
",",
"element",
")",
":",
"node",
"=",
"{",
"}",
"text",
"=",
"getattr",
"(",
"element",
",",
"'text'",
")",
"if",
"text",
"is",
"not",
"None",
":",
"text",
"=",
"text",
".",
"strip",
"(",
")",
"if",
"len... | Convert XML elementtree to dictionary.
Converts the actual response from the ILO for an API
to the dictionary. | [
"Convert",
"XML",
"elementtree",
"to",
"dictionary",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L255-L277 | train | 41,680 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._validate_message | def _validate_message(self, message):
"""Validate XML response from iLO.
This function validates the XML response to see
if the exit status is 0 or not in the response.
If the status is non-zero it raises exception.
"""
if message.tag != 'RIBCL':
# the true case shall be unreachable for response
# XML from Ilo as all messages are tagged with RIBCL
# but still raise an exception if any invalid
# XML response is returned by Ilo. Set status to some
# arbitary non-zero value.
status = -1
raise exception.IloClientInternalError(message, status)
for child in message:
if child.tag != 'RESPONSE':
return message
status = int(child.get('STATUS'), 16)
msg = child.get('MESSAGE')
if status == 0 and msg != 'No error':
return msg
if status != 0:
if 'syntax error' in msg or 'Feature not supported' in msg:
for cmd in BOOT_MODE_CMDS:
if cmd in msg:
platform = self.get_product_name()
msg = ("%(cmd)s is not supported on %(platform)s" %
{'cmd': cmd, 'platform': platform})
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise (exception.IloCommandNotSupportedError
(msg, status))
else:
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise exception.IloClientInternalError(msg, status)
if (status in exception.IloLoginFailError.statuses or
msg in exception.IloLoginFailError.messages):
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise exception.IloLoginFailError(msg, status)
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise exception.IloError(msg, status) | python | def _validate_message(self, message):
"""Validate XML response from iLO.
This function validates the XML response to see
if the exit status is 0 or not in the response.
If the status is non-zero it raises exception.
"""
if message.tag != 'RIBCL':
# the true case shall be unreachable for response
# XML from Ilo as all messages are tagged with RIBCL
# but still raise an exception if any invalid
# XML response is returned by Ilo. Set status to some
# arbitary non-zero value.
status = -1
raise exception.IloClientInternalError(message, status)
for child in message:
if child.tag != 'RESPONSE':
return message
status = int(child.get('STATUS'), 16)
msg = child.get('MESSAGE')
if status == 0 and msg != 'No error':
return msg
if status != 0:
if 'syntax error' in msg or 'Feature not supported' in msg:
for cmd in BOOT_MODE_CMDS:
if cmd in msg:
platform = self.get_product_name()
msg = ("%(cmd)s is not supported on %(platform)s" %
{'cmd': cmd, 'platform': platform})
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise (exception.IloCommandNotSupportedError
(msg, status))
else:
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise exception.IloClientInternalError(msg, status)
if (status in exception.IloLoginFailError.statuses or
msg in exception.IloLoginFailError.messages):
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise exception.IloLoginFailError(msg, status)
LOG.debug(self._("Got invalid response with "
"message: '%(message)s'"),
{'message': msg})
raise exception.IloError(msg, status) | [
"def",
"_validate_message",
"(",
"self",
",",
"message",
")",
":",
"if",
"message",
".",
"tag",
"!=",
"'RIBCL'",
":",
"# the true case shall be unreachable for response",
"# XML from Ilo as all messages are tagged with RIBCL",
"# but still raise an exception if any invalid",
"# X... | Validate XML response from iLO.
This function validates the XML response to see
if the exit status is 0 or not in the response.
If the status is non-zero it raises exception. | [
"Validate",
"XML",
"response",
"from",
"iLO",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L279-L329 | train | 41,681 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._execute_command | def _execute_command(self, create_command, tag_info, mode, dic={}):
"""Execute a command on the iLO.
Common infrastructure used by all APIs to send/get
response from ILO.
"""
xml = self._create_dynamic_xml(
create_command, tag_info, mode, dic)
d = self._request_ilo(xml)
data = self._parse_output(d)
LOG.debug(self._("Received response data: %s"), data)
return data | python | def _execute_command(self, create_command, tag_info, mode, dic={}):
"""Execute a command on the iLO.
Common infrastructure used by all APIs to send/get
response from ILO.
"""
xml = self._create_dynamic_xml(
create_command, tag_info, mode, dic)
d = self._request_ilo(xml)
data = self._parse_output(d)
LOG.debug(self._("Received response data: %s"), data)
return data | [
"def",
"_execute_command",
"(",
"self",
",",
"create_command",
",",
"tag_info",
",",
"mode",
",",
"dic",
"=",
"{",
"}",
")",
":",
"xml",
"=",
"self",
".",
"_create_dynamic_xml",
"(",
"create_command",
",",
"tag_info",
",",
"mode",
",",
"dic",
")",
"d",
... | Execute a command on the iLO.
Common infrastructure used by all APIs to send/get
response from ILO. | [
"Execute",
"a",
"command",
"on",
"the",
"iLO",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L331-L342 | train | 41,682 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_all_licenses | def get_all_licenses(self):
"""Retrieve license type, key, installation date, etc."""
data = self._execute_command('GET_ALL_LICENSES', 'RIB_INFO', 'read')
d = {}
for key, val in data['GET_ALL_LICENSES']['LICENSE'].items():
if isinstance(val, dict):
d[key] = data['GET_ALL_LICENSES']['LICENSE'][key]['VALUE']
return d | python | def get_all_licenses(self):
"""Retrieve license type, key, installation date, etc."""
data = self._execute_command('GET_ALL_LICENSES', 'RIB_INFO', 'read')
d = {}
for key, val in data['GET_ALL_LICENSES']['LICENSE'].items():
if isinstance(val, dict):
d[key] = data['GET_ALL_LICENSES']['LICENSE'][key]['VALUE']
return d | [
"def",
"get_all_licenses",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"_execute_command",
"(",
"'GET_ALL_LICENSES'",
",",
"'RIB_INFO'",
",",
"'read'",
")",
"d",
"=",
"{",
"}",
"for",
"key",
",",
"val",
"in",
"data",
"[",
"'GET_ALL_LICENSES'",
"]",
... | Retrieve license type, key, installation date, etc. | [
"Retrieve",
"license",
"type",
"key",
"installation",
"date",
"etc",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L344-L351 | train | 41,683 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.set_pending_boot_mode | def set_pending_boot_mode(self, value):
"""Configures the boot mode of the system from a specific boot mode."""
dic = {'value': value}
data = self._execute_command(
'SET_PENDING_BOOT_MODE', 'SERVER_INFO', 'write', dic)
return data | python | def set_pending_boot_mode(self, value):
"""Configures the boot mode of the system from a specific boot mode."""
dic = {'value': value}
data = self._execute_command(
'SET_PENDING_BOOT_MODE', 'SERVER_INFO', 'write', dic)
return data | [
"def",
"set_pending_boot_mode",
"(",
"self",
",",
"value",
")",
":",
"dic",
"=",
"{",
"'value'",
":",
"value",
"}",
"data",
"=",
"self",
".",
"_execute_command",
"(",
"'SET_PENDING_BOOT_MODE'",
",",
"'SERVER_INFO'",
",",
"'write'",
",",
"dic",
")",
"return",... | Configures the boot mode of the system from a specific boot mode. | [
"Configures",
"the",
"boot",
"mode",
"of",
"the",
"system",
"from",
"a",
"specific",
"boot",
"mode",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L495-L500 | train | 41,684 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_persistent_boot_device | def get_persistent_boot_device(self):
"""Get the current persistent boot device set for the host."""
result = self._get_persistent_boot()
boot_mode = self._check_boot_mode(result)
if boot_mode == 'bios':
return result[0]['value']
value = result[0]['DESCRIPTION']
if 'HP iLO Virtual USB CD' in value:
return 'CDROM'
elif 'NIC' in value or 'PXE' in value:
return 'NETWORK'
elif common.isDisk(value):
return 'HDD'
else:
return None | python | def get_persistent_boot_device(self):
"""Get the current persistent boot device set for the host."""
result = self._get_persistent_boot()
boot_mode = self._check_boot_mode(result)
if boot_mode == 'bios':
return result[0]['value']
value = result[0]['DESCRIPTION']
if 'HP iLO Virtual USB CD' in value:
return 'CDROM'
elif 'NIC' in value or 'PXE' in value:
return 'NETWORK'
elif common.isDisk(value):
return 'HDD'
else:
return None | [
"def",
"get_persistent_boot_device",
"(",
"self",
")",
":",
"result",
"=",
"self",
".",
"_get_persistent_boot",
"(",
")",
"boot_mode",
"=",
"self",
".",
"_check_boot_mode",
"(",
"result",
")",
"if",
"boot_mode",
"==",
"'bios'",
":",
"return",
"result",
"[",
... | Get the current persistent boot device set for the host. | [
"Get",
"the",
"current",
"persistent",
"boot",
"device",
"set",
"for",
"the",
"host",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L509-L528 | train | 41,685 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._set_persistent_boot | def _set_persistent_boot(self, values=[]):
"""Configures a boot from a specific device."""
xml = self._create_dynamic_xml(
'SET_PERSISTENT_BOOT', 'SERVER_INFO', 'write')
if six.PY2:
child_iterator = xml.getiterator()
else:
child_iterator = xml.iter()
for child in child_iterator:
for val in values:
if child.tag == 'SET_PERSISTENT_BOOT':
etree.SubElement(child, 'DEVICE', VALUE=val)
d = self._request_ilo(xml)
data = self._parse_output(d)
return data | python | def _set_persistent_boot(self, values=[]):
"""Configures a boot from a specific device."""
xml = self._create_dynamic_xml(
'SET_PERSISTENT_BOOT', 'SERVER_INFO', 'write')
if six.PY2:
child_iterator = xml.getiterator()
else:
child_iterator = xml.iter()
for child in child_iterator:
for val in values:
if child.tag == 'SET_PERSISTENT_BOOT':
etree.SubElement(child, 'DEVICE', VALUE=val)
d = self._request_ilo(xml)
data = self._parse_output(d)
return data | [
"def",
"_set_persistent_boot",
"(",
"self",
",",
"values",
"=",
"[",
"]",
")",
":",
"xml",
"=",
"self",
".",
"_create_dynamic_xml",
"(",
"'SET_PERSISTENT_BOOT'",
",",
"'SERVER_INFO'",
",",
"'write'",
")",
"if",
"six",
".",
"PY2",
":",
"child_iterator",
"=",
... | Configures a boot from a specific device. | [
"Configures",
"a",
"boot",
"from",
"a",
"specific",
"device",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L530-L547 | train | 41,686 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._request_host | def _request_host(self):
"""Request host info from the server."""
urlstr = 'https://%s/xmldata?item=all' % (self.host)
kwargs = {}
if self.cacert is not None:
kwargs['verify'] = self.cacert
else:
kwargs['verify'] = False
try:
response = requests.get(urlstr, **kwargs)
response.raise_for_status()
except Exception as e:
raise IloConnectionError(e)
return response.text | python | def _request_host(self):
"""Request host info from the server."""
urlstr = 'https://%s/xmldata?item=all' % (self.host)
kwargs = {}
if self.cacert is not None:
kwargs['verify'] = self.cacert
else:
kwargs['verify'] = False
try:
response = requests.get(urlstr, **kwargs)
response.raise_for_status()
except Exception as e:
raise IloConnectionError(e)
return response.text | [
"def",
"_request_host",
"(",
"self",
")",
":",
"urlstr",
"=",
"'https://%s/xmldata?item=all'",
"%",
"(",
"self",
".",
"host",
")",
"kwargs",
"=",
"{",
"}",
"if",
"self",
".",
"cacert",
"is",
"not",
"None",
":",
"kwargs",
"[",
"'verify'",
"]",
"=",
"sel... | Request host info from the server. | [
"Request",
"host",
"info",
"from",
"the",
"server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L630-L644 | train | 41,687 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_host_uuid | def get_host_uuid(self):
"""Request host UUID of the server.
:returns: the host UUID of the server
:raises: IloConnectionError if failed connecting to the iLO.
"""
xml = self._request_host()
root = etree.fromstring(xml)
data = self._elementtree_to_dict(root)
return data['HSI']['SPN']['text'], data['HSI']['cUUID']['text'] | python | def get_host_uuid(self):
"""Request host UUID of the server.
:returns: the host UUID of the server
:raises: IloConnectionError if failed connecting to the iLO.
"""
xml = self._request_host()
root = etree.fromstring(xml)
data = self._elementtree_to_dict(root)
return data['HSI']['SPN']['text'], data['HSI']['cUUID']['text'] | [
"def",
"get_host_uuid",
"(",
"self",
")",
":",
"xml",
"=",
"self",
".",
"_request_host",
"(",
")",
"root",
"=",
"etree",
".",
"fromstring",
"(",
"xml",
")",
"data",
"=",
"self",
".",
"_elementtree_to_dict",
"(",
"root",
")",
"return",
"data",
"[",
"'HS... | Request host UUID of the server.
:returns: the host UUID of the server
:raises: IloConnectionError if failed connecting to the iLO. | [
"Request",
"host",
"UUID",
"of",
"the",
"server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L646-L655 | train | 41,688 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_host_health_data | def get_host_health_data(self, data=None):
"""Request host health data of the server.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the embedded health data.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
if not data or data and "GET_EMBEDDED_HEALTH_DATA" not in data:
data = self._execute_command(
'GET_EMBEDDED_HEALTH', 'SERVER_INFO', 'read')
return data | python | def get_host_health_data(self, data=None):
"""Request host health data of the server.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the embedded health data.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
if not data or data and "GET_EMBEDDED_HEALTH_DATA" not in data:
data = self._execute_command(
'GET_EMBEDDED_HEALTH', 'SERVER_INFO', 'read')
return data | [
"def",
"get_host_health_data",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"if",
"not",
"data",
"or",
"data",
"and",
"\"GET_EMBEDDED_HEALTH_DATA\"",
"not",
"in",
"data",
":",
"data",
"=",
"self",
".",
"_execute_command",
"(",
"'GET_EMBEDDED_HEALTH'",
",",... | Request host health data of the server.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the embedded health data.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO. | [
"Request",
"host",
"health",
"data",
"of",
"the",
"server",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L657-L668 | train | 41,689 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_host_health_power_supplies | def get_host_health_power_supplies(self, data=None):
"""Request the health power supply information.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the power supply information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
data = self.get_host_health_data(data)
d = (data['GET_EMBEDDED_HEALTH_DATA']['POWER_SUPPLIES']['SUPPLY'])
if not isinstance(d, list):
d = [d]
return d | python | def get_host_health_power_supplies(self, data=None):
"""Request the health power supply information.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the power supply information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
data = self.get_host_health_data(data)
d = (data['GET_EMBEDDED_HEALTH_DATA']['POWER_SUPPLIES']['SUPPLY'])
if not isinstance(d, list):
d = [d]
return d | [
"def",
"get_host_health_power_supplies",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"data",
"=",
"self",
".",
"get_host_health_data",
"(",
"data",
")",
"d",
"=",
"(",
"data",
"[",
"'GET_EMBEDDED_HEALTH_DATA'",
"]",
"[",
"'POWER_SUPPLIES'",
"]",
"[",
"'... | Request the health power supply information.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the power supply information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO. | [
"Request",
"the",
"health",
"power",
"supply",
"information",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L683-L695 | train | 41,690 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_host_health_temperature_sensors | def get_host_health_temperature_sensors(self, data=None):
"""Get the health Temp Sensor report.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the temperature sensors
information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
data = self.get_host_health_data(data)
d = data['GET_EMBEDDED_HEALTH_DATA']['TEMPERATURE']['TEMP']
if not isinstance(d, list):
d = [d]
return d | python | def get_host_health_temperature_sensors(self, data=None):
"""Get the health Temp Sensor report.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the temperature sensors
information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
data = self.get_host_health_data(data)
d = data['GET_EMBEDDED_HEALTH_DATA']['TEMPERATURE']['TEMP']
if not isinstance(d, list):
d = [d]
return d | [
"def",
"get_host_health_temperature_sensors",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"data",
"=",
"self",
".",
"get_host_health_data",
"(",
"data",
")",
"d",
"=",
"data",
"[",
"'GET_EMBEDDED_HEALTH_DATA'",
"]",
"[",
"'TEMPERATURE'",
"]",
"[",
"'TEMP'... | Get the health Temp Sensor report.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the temperature sensors
information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO. | [
"Get",
"the",
"health",
"Temp",
"Sensor",
"report",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L697-L710 | train | 41,691 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_host_health_fan_sensors | def get_host_health_fan_sensors(self, data=None):
"""Get the health Fan Sensor Report.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the fan sensor information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
data = self.get_host_health_data(data)
d = data['GET_EMBEDDED_HEALTH_DATA']['FANS']['FAN']
if not isinstance(d, list):
d = [d]
return d | python | def get_host_health_fan_sensors(self, data=None):
"""Get the health Fan Sensor Report.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the fan sensor information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO.
"""
data = self.get_host_health_data(data)
d = data['GET_EMBEDDED_HEALTH_DATA']['FANS']['FAN']
if not isinstance(d, list):
d = [d]
return d | [
"def",
"get_host_health_fan_sensors",
"(",
"self",
",",
"data",
"=",
"None",
")",
":",
"data",
"=",
"self",
".",
"get_host_health_data",
"(",
"data",
")",
"d",
"=",
"data",
"[",
"'GET_EMBEDDED_HEALTH_DATA'",
"]",
"[",
"'FANS'",
"]",
"[",
"'FAN'",
"]",
"if"... | Get the health Fan Sensor Report.
:param: the data to retrieve from the server, defaults to None.
:returns: the dictionary containing the fan sensor information.
:raises: IloConnectionError if failed connecting to the iLO.
:raises: IloError, on an error from iLO. | [
"Get",
"the",
"health",
"Fan",
"Sensor",
"Report",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L712-L724 | train | 41,692 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_essential_properties | def get_essential_properties(self):
"""Gets essential scheduling properties as required by ironic
:returns: a dictionary of server properties like memory size,
disk size, number of cpus, cpu arch, port numbers
and mac addresses.
:raises:IloError if iLO returns an error in command execution.
"""
data = self.get_host_health_data()
properties = {
'memory_mb': self._parse_memory_embedded_health(data)
}
cpus, cpu_arch = self._parse_processor_embedded_health(data)
properties['cpus'] = cpus
properties['cpu_arch'] = cpu_arch
properties['local_gb'] = self._parse_storage_embedded_health(data)
macs = self._parse_nics_embedded_health(data)
return_value = {'properties': properties, 'macs': macs}
return return_value | python | def get_essential_properties(self):
"""Gets essential scheduling properties as required by ironic
:returns: a dictionary of server properties like memory size,
disk size, number of cpus, cpu arch, port numbers
and mac addresses.
:raises:IloError if iLO returns an error in command execution.
"""
data = self.get_host_health_data()
properties = {
'memory_mb': self._parse_memory_embedded_health(data)
}
cpus, cpu_arch = self._parse_processor_embedded_health(data)
properties['cpus'] = cpus
properties['cpu_arch'] = cpu_arch
properties['local_gb'] = self._parse_storage_embedded_health(data)
macs = self._parse_nics_embedded_health(data)
return_value = {'properties': properties, 'macs': macs}
return return_value | [
"def",
"get_essential_properties",
"(",
"self",
")",
":",
"data",
"=",
"self",
".",
"get_host_health_data",
"(",
")",
"properties",
"=",
"{",
"'memory_mb'",
":",
"self",
".",
"_parse_memory_embedded_health",
"(",
"data",
")",
"}",
"cpus",
",",
"cpu_arch",
"=",... | Gets essential scheduling properties as required by ironic
:returns: a dictionary of server properties like memory size,
disk size, number of cpus, cpu arch, port numbers
and mac addresses.
:raises:IloError if iLO returns an error in command execution. | [
"Gets",
"essential",
"scheduling",
"properties",
"as",
"required",
"by",
"ironic"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L788-L807 | train | 41,693 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._parse_storage_embedded_health | def _parse_storage_embedded_health(self, data):
"""Gets the storage data from get_embedded_health
Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: disk size in GB.
"""
local_gb = 0
storage = self.get_value_as_list(data['GET_EMBEDDED_HEALTH_DATA'],
'STORAGE')
if storage is None:
# We dont raise exception because this dictionary
# is available only when RAID is configured.
# If we raise error here then we will always fail
# inspection where this module is consumed. Hence
# as a workaround just return 0.
return local_gb
minimum = local_gb
for item in storage:
cntlr = self.get_value_as_list(item, 'CONTROLLER')
if cntlr is None:
continue
for s in cntlr:
drive = self.get_value_as_list(s, 'LOGICAL_DRIVE')
if drive is None:
continue
for item in drive:
for key, val in item.items():
if key == 'CAPACITY':
capacity = val['VALUE']
local_bytes = (strutils.string_to_bytes(
capacity.replace(' ', ''),
return_int=True))
local_gb = int(local_bytes / (1024 * 1024 * 1024))
if minimum >= local_gb or minimum == 0:
minimum = local_gb
# Return disk size 1 less than the actual disk size. This prevents
# the deploy to fail from Nova when root_gb is same as local_gb
# in Ironic. When the disk size is used as root_device hints,
# then it should be given as the actual size i.e.
# ironic (node.properties['local_gb'] + 1) else root device
# hint will fail.
if minimum:
minimum = minimum - 1
return minimum | python | def _parse_storage_embedded_health(self, data):
"""Gets the storage data from get_embedded_health
Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: disk size in GB.
"""
local_gb = 0
storage = self.get_value_as_list(data['GET_EMBEDDED_HEALTH_DATA'],
'STORAGE')
if storage is None:
# We dont raise exception because this dictionary
# is available only when RAID is configured.
# If we raise error here then we will always fail
# inspection where this module is consumed. Hence
# as a workaround just return 0.
return local_gb
minimum = local_gb
for item in storage:
cntlr = self.get_value_as_list(item, 'CONTROLLER')
if cntlr is None:
continue
for s in cntlr:
drive = self.get_value_as_list(s, 'LOGICAL_DRIVE')
if drive is None:
continue
for item in drive:
for key, val in item.items():
if key == 'CAPACITY':
capacity = val['VALUE']
local_bytes = (strutils.string_to_bytes(
capacity.replace(' ', ''),
return_int=True))
local_gb = int(local_bytes / (1024 * 1024 * 1024))
if minimum >= local_gb or minimum == 0:
minimum = local_gb
# Return disk size 1 less than the actual disk size. This prevents
# the deploy to fail from Nova when root_gb is same as local_gb
# in Ironic. When the disk size is used as root_device hints,
# then it should be given as the actual size i.e.
# ironic (node.properties['local_gb'] + 1) else root device
# hint will fail.
if minimum:
minimum = minimum - 1
return minimum | [
"def",
"_parse_storage_embedded_health",
"(",
"self",
",",
"data",
")",
":",
"local_gb",
"=",
"0",
"storage",
"=",
"self",
".",
"get_value_as_list",
"(",
"data",
"[",
"'GET_EMBEDDED_HEALTH_DATA'",
"]",
",",
"'STORAGE'",
")",
"if",
"storage",
"is",
"None",
":",... | Gets the storage data from get_embedded_health
Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: disk size in GB. | [
"Gets",
"the",
"storage",
"data",
"from",
"get_embedded_health"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L889-L938 | train | 41,694 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations.get_value_as_list | def get_value_as_list(self, dictionary, key):
"""Helper function to check and convert a value to list.
Helper function to check and convert a value to json list.
This helps the ribcl data to be generalized across the servers.
:param dictionary: a dictionary to check in if key is present.
:param key: key to be checked if thats present in the given dictionary.
:returns the data converted to a list.
"""
if key not in dictionary:
return None
value = dictionary[key]
if not isinstance(value, list):
return [value]
else:
return value | python | def get_value_as_list(self, dictionary, key):
"""Helper function to check and convert a value to list.
Helper function to check and convert a value to json list.
This helps the ribcl data to be generalized across the servers.
:param dictionary: a dictionary to check in if key is present.
:param key: key to be checked if thats present in the given dictionary.
:returns the data converted to a list.
"""
if key not in dictionary:
return None
value = dictionary[key]
if not isinstance(value, list):
return [value]
else:
return value | [
"def",
"get_value_as_list",
"(",
"self",
",",
"dictionary",
",",
"key",
")",
":",
"if",
"key",
"not",
"in",
"dictionary",
":",
"return",
"None",
"value",
"=",
"dictionary",
"[",
"key",
"]",
"if",
"not",
"isinstance",
"(",
"value",
",",
"list",
")",
":"... | Helper function to check and convert a value to list.
Helper function to check and convert a value to json list.
This helps the ribcl data to be generalized across the servers.
:param dictionary: a dictionary to check in if key is present.
:param key: key to be checked if thats present in the given dictionary.
:returns the data converted to a list. | [
"Helper",
"function",
"to",
"check",
"and",
"convert",
"a",
"value",
"to",
"list",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L940-L957 | train | 41,695 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._parse_nics_embedded_health | def _parse_nics_embedded_health(self, data):
"""Gets the NIC details from get_embedded_health data
Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: a dictionary of port numbers and their corresponding
mac addresses.
:raises IloError, if unable to get NIC data.
"""
nic_data = self.get_value_as_list((data['GET_EMBEDDED_HEALTH_DATA']
[self.NIC_INFORMATION_TAG]), 'NIC')
if nic_data is None:
msg = "Unable to get NIC details. Data missing"
raise exception.IloError(msg)
nic_dict = {}
for item in nic_data:
try:
port = item['NETWORK_PORT']['VALUE']
mac = item['MAC_ADDRESS']['VALUE']
self._update_nic_data_from_nic_info_based_on_model(nic_dict,
item, port,
mac)
except KeyError:
msg = "Unable to get NIC details. Data missing"
raise exception.IloError(msg)
return nic_dict | python | def _parse_nics_embedded_health(self, data):
"""Gets the NIC details from get_embedded_health data
Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: a dictionary of port numbers and their corresponding
mac addresses.
:raises IloError, if unable to get NIC data.
"""
nic_data = self.get_value_as_list((data['GET_EMBEDDED_HEALTH_DATA']
[self.NIC_INFORMATION_TAG]), 'NIC')
if nic_data is None:
msg = "Unable to get NIC details. Data missing"
raise exception.IloError(msg)
nic_dict = {}
for item in nic_data:
try:
port = item['NETWORK_PORT']['VALUE']
mac = item['MAC_ADDRESS']['VALUE']
self._update_nic_data_from_nic_info_based_on_model(nic_dict,
item, port,
mac)
except KeyError:
msg = "Unable to get NIC details. Data missing"
raise exception.IloError(msg)
return nic_dict | [
"def",
"_parse_nics_embedded_health",
"(",
"self",
",",
"data",
")",
":",
"nic_data",
"=",
"self",
".",
"get_value_as_list",
"(",
"(",
"data",
"[",
"'GET_EMBEDDED_HEALTH_DATA'",
"]",
"[",
"self",
".",
"NIC_INFORMATION_TAG",
"]",
")",
",",
"'NIC'",
")",
"if",
... | Gets the NIC details from get_embedded_health data
Parse the get_host_health_data() for essential properties
:param data: the output returned by get_host_health_data()
:returns: a dictionary of port numbers and their corresponding
mac addresses.
:raises IloError, if unable to get NIC data. | [
"Gets",
"the",
"NIC",
"details",
"from",
"get_embedded_health",
"data"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L959-L989 | train | 41,696 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._get_rom_firmware_version | def _get_rom_firmware_version(self, data):
"""Gets the rom firmware version for server capabilities
Parse the get_host_health_data() to retreive the firmware
details.
:param data: the output returned by get_host_health_data()
:returns: a dictionary of rom firmware version.
"""
firmware_details = self._get_firmware_embedded_health(data)
if firmware_details:
try:
rom_firmware_version = (
firmware_details['HP ProLiant System ROM'])
return {'rom_firmware_version': rom_firmware_version}
except KeyError:
return None | python | def _get_rom_firmware_version(self, data):
"""Gets the rom firmware version for server capabilities
Parse the get_host_health_data() to retreive the firmware
details.
:param data: the output returned by get_host_health_data()
:returns: a dictionary of rom firmware version.
"""
firmware_details = self._get_firmware_embedded_health(data)
if firmware_details:
try:
rom_firmware_version = (
firmware_details['HP ProLiant System ROM'])
return {'rom_firmware_version': rom_firmware_version}
except KeyError:
return None | [
"def",
"_get_rom_firmware_version",
"(",
"self",
",",
"data",
")",
":",
"firmware_details",
"=",
"self",
".",
"_get_firmware_embedded_health",
"(",
"data",
")",
"if",
"firmware_details",
":",
"try",
":",
"rom_firmware_version",
"=",
"(",
"firmware_details",
"[",
"... | Gets the rom firmware version for server capabilities
Parse the get_host_health_data() to retreive the firmware
details.
:param data: the output returned by get_host_health_data()
:returns: a dictionary of rom firmware version. | [
"Gets",
"the",
"rom",
"firmware",
"version",
"for",
"server",
"capabilities"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L1006-L1023 | train | 41,697 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._get_number_of_gpu_devices_connected | def _get_number_of_gpu_devices_connected(self, data):
"""Gets the number of GPU devices connected to the server
Parse the get_host_health_data() and get the count of
number of GPU devices connected to the server.
:param data: the output returned by get_host_health_data()
:returns: a dictionary of rom firmware version.
"""
temp = self.get_value_as_list((data['GET_EMBEDDED_HEALTH_DATA']
['TEMPERATURE']), 'TEMP')
count = 0
if temp is None:
return {'pci_gpu_devices': count}
for key in temp:
for name, value in key.items():
if name == 'LABEL' and 'GPU' in value['VALUE']:
count = count + 1
return {'pci_gpu_devices': count} | python | def _get_number_of_gpu_devices_connected(self, data):
"""Gets the number of GPU devices connected to the server
Parse the get_host_health_data() and get the count of
number of GPU devices connected to the server.
:param data: the output returned by get_host_health_data()
:returns: a dictionary of rom firmware version.
"""
temp = self.get_value_as_list((data['GET_EMBEDDED_HEALTH_DATA']
['TEMPERATURE']), 'TEMP')
count = 0
if temp is None:
return {'pci_gpu_devices': count}
for key in temp:
for name, value in key.items():
if name == 'LABEL' and 'GPU' in value['VALUE']:
count = count + 1
return {'pci_gpu_devices': count} | [
"def",
"_get_number_of_gpu_devices_connected",
"(",
"self",
",",
"data",
")",
":",
"temp",
"=",
"self",
".",
"get_value_as_list",
"(",
"(",
"data",
"[",
"'GET_EMBEDDED_HEALTH_DATA'",
"]",
"[",
"'TEMPERATURE'",
"]",
")",
",",
"'TEMP'",
")",
"count",
"=",
"0",
... | Gets the number of GPU devices connected to the server
Parse the get_host_health_data() and get the count of
number of GPU devices connected to the server.
:param data: the output returned by get_host_health_data()
:returns: a dictionary of rom firmware version. | [
"Gets",
"the",
"number",
"of",
"GPU",
"devices",
"connected",
"to",
"the",
"server"
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L1058-L1079 | train | 41,698 |
openstack/proliantutils | proliantutils/ilo/ribcl.py | RIBCLOperations._get_firmware_update_xml_for_file_and_component | def _get_firmware_update_xml_for_file_and_component(
self, filename, component):
"""Creates the dynamic xml for flashing the device firmware via iLO.
This method creates the dynamic xml for flashing the firmware, based
on the component type so passed.
:param filename: location of the raw firmware file.
:param component_type: Type of component to be applied to.
:returns: the etree.Element for the root of the RIBCL XML
for flashing the device (component) firmware.
"""
if component == 'ilo':
cmd_name = 'UPDATE_RIB_FIRMWARE'
else:
# Note(deray): Not explicitly checking for all other supported
# devices (components), as those checks have already happened
# in the invoking methods and may seem redundant here.
cmd_name = 'UPDATE_FIRMWARE'
fwlen = os.path.getsize(filename)
root = self._create_dynamic_xml(cmd_name,
'RIB_INFO',
'write',
subelements={
'IMAGE_LOCATION': filename,
'IMAGE_LENGTH': str(fwlen)
})
return root | python | def _get_firmware_update_xml_for_file_and_component(
self, filename, component):
"""Creates the dynamic xml for flashing the device firmware via iLO.
This method creates the dynamic xml for flashing the firmware, based
on the component type so passed.
:param filename: location of the raw firmware file.
:param component_type: Type of component to be applied to.
:returns: the etree.Element for the root of the RIBCL XML
for flashing the device (component) firmware.
"""
if component == 'ilo':
cmd_name = 'UPDATE_RIB_FIRMWARE'
else:
# Note(deray): Not explicitly checking for all other supported
# devices (components), as those checks have already happened
# in the invoking methods and may seem redundant here.
cmd_name = 'UPDATE_FIRMWARE'
fwlen = os.path.getsize(filename)
root = self._create_dynamic_xml(cmd_name,
'RIB_INFO',
'write',
subelements={
'IMAGE_LOCATION': filename,
'IMAGE_LENGTH': str(fwlen)
})
return root | [
"def",
"_get_firmware_update_xml_for_file_and_component",
"(",
"self",
",",
"filename",
",",
"component",
")",
":",
"if",
"component",
"==",
"'ilo'",
":",
"cmd_name",
"=",
"'UPDATE_RIB_FIRMWARE'",
"else",
":",
"# Note(deray): Not explicitly checking for all other supported",
... | Creates the dynamic xml for flashing the device firmware via iLO.
This method creates the dynamic xml for flashing the firmware, based
on the component type so passed.
:param filename: location of the raw firmware file.
:param component_type: Type of component to be applied to.
:returns: the etree.Element for the root of the RIBCL XML
for flashing the device (component) firmware. | [
"Creates",
"the",
"dynamic",
"xml",
"for",
"flashing",
"the",
"device",
"firmware",
"via",
"iLO",
"."
] | 86ef3b47b4eca97c221577e3570b0240d6a25f22 | https://github.com/openstack/proliantutils/blob/86ef3b47b4eca97c221577e3570b0240d6a25f22/proliantutils/ilo/ribcl.py#L1128-L1155 | train | 41,699 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.