idx
int64
0
63k
question
stringlengths
53
5.28k
target
stringlengths
5
805
41,600
def _get_bios_boot_resource ( self , data ) : try : boot_uri = data [ 'links' ] [ 'Boot' ] [ 'href' ] except KeyError : msg = ( 'Boot resource not found.' ) raise exception . IloCommandNotSupportedError ( msg ) status , headers , boot_settings = self . _rest_get ( boot_uri ) if status != 200 : msg = self . _get_extended_error ( boot_settings ) raise exception . IloError ( msg ) return boot_settings
Get the Boot resource like BootSources .
41,601
def _get_bios_mappings_resource ( self , data ) : try : map_uri = data [ 'links' ] [ 'Mappings' ] [ 'href' ] except KeyError : msg = ( 'Mappings resource not found.' ) raise exception . IloCommandNotSupportedError ( msg ) status , headers , map_settings = self . _rest_get ( map_uri ) if status != 200 : msg = self . _get_extended_error ( map_settings ) raise exception . IloError ( msg ) return map_settings
Get the Mappings resource .
41,602
def _check_iscsi_rest_patch_allowed ( self ) : headers , bios_uri , bios_settings = self . _check_bios_resource ( ) if ( 'links' in bios_settings and 'iScsi' in bios_settings [ 'links' ] ) : iscsi_uri = bios_settings [ 'links' ] [ 'iScsi' ] [ 'href' ] status , headers , settings = self . _rest_get ( iscsi_uri ) if status != 200 : msg = self . _get_extended_error ( settings ) raise exception . IloError ( msg ) if not self . _operation_allowed ( headers , 'PATCH' ) : headers , iscsi_uri , settings = ( self . _get_iscsi_settings_resource ( settings ) ) self . _validate_if_patch_supported ( headers , iscsi_uri ) return iscsi_uri else : msg = ( '"links/iScsi" section in bios' ' does not exist' ) raise exception . IloCommandNotSupportedError ( msg )
Checks if patch is supported on iscsi .
41,603
def _change_iscsi_settings ( self , iscsi_info ) : headers , bios_uri , bios_settings = self . _check_bios_resource ( ) map_settings = self . _get_bios_mappings_resource ( bios_settings ) nics = [ ] for mapping in map_settings [ 'BiosPciSettingsMappings' ] : for subinstance in mapping [ 'Subinstances' ] : for association in subinstance [ 'Associations' ] : if 'NicBoot' in association : nics . append ( association ) if not nics : msg = ( 'No nics found' ) raise exception . IloError ( msg ) iscsi_uri = self . _check_iscsi_rest_patch_allowed ( ) iscsi_infos = [ ] for nic in nics : data = iscsi_info . copy ( ) data [ 'iSCSIBootAttemptName' ] = nic data [ 'iSCSINicSource' ] = nic data [ 'iSCSIBootAttemptInstance' ] = nics . index ( nic ) + 1 iscsi_infos . append ( data ) patch_data = { 'iSCSIBootSources' : iscsi_infos } status , headers , response = self . _rest_patch ( iscsi_uri , None , patch_data ) if status >= 300 : msg = self . _get_extended_error ( response ) raise exception . IloError ( msg )
Change iSCSI settings .
41,604
def _change_secure_boot_settings ( self , property , value ) : system = self . _get_host_details ( ) if ( 'links' not in system [ 'Oem' ] [ 'Hp' ] or 'SecureBoot' not in system [ 'Oem' ] [ 'Hp' ] [ 'links' ] ) : msg = ( ' "SecureBoot" resource or feature is not ' 'supported on this system' ) raise exception . IloCommandNotSupportedError ( msg ) secure_boot_uri = system [ 'Oem' ] [ 'Hp' ] [ 'links' ] [ 'SecureBoot' ] [ 'href' ] new_secure_boot_settings = { } new_secure_boot_settings [ property ] = value status , headers , response = self . _rest_patch ( secure_boot_uri , None , new_secure_boot_settings ) if status >= 300 : msg = self . _get_extended_error ( response ) raise exception . IloError ( msg ) val = self . _get_bios_setting ( 'CustomPostMessage' ) val = val . rstrip ( ) if val . endswith ( " " ) else val + " " self . _change_bios_setting ( { 'CustomPostMessage' : val } )
Change secure boot settings on the server .
41,605
def clear_secure_boot_keys ( self ) : if self . _is_boot_mode_uefi ( ) : self . _change_secure_boot_settings ( 'ResetAllKeys' , True ) else : msg = ( 'System is not in UEFI boot mode. "SecureBoot" related ' 'resources cannot be changed.' ) raise exception . IloCommandNotSupportedInBiosError ( msg )
Reset all keys .
41,606
def _perform_power_op ( self , oper ) : power_settings = { "Action" : "Reset" , "ResetType" : oper } systems_uri = "/rest/v1/Systems/1" status , headers , response = self . _rest_post ( systems_uri , None , power_settings ) if status >= 300 : msg = self . _get_extended_error ( response ) raise exception . IloError ( msg )
Perform requested power operation .
41,607
def _retry_until_powered_on ( self , power ) : status = self . get_host_power_status ( ) if ( status != power ) : self . _perform_power_op ( POWER_STATE [ power ] ) return self . get_host_power_status ( ) else : return status
This method retries power on operation .
41,608
def get_http_boot_url ( self ) : if ( self . _is_boot_mode_uefi ( ) is True ) : return self . _get_bios_setting ( 'UefiShellStartupUrl' ) else : msg = 'get_http_boot_url is not supported in the BIOS boot mode' raise exception . IloCommandNotSupportedInBiosError ( msg )
Request the http boot url from system in uefi boot mode .
41,609
def set_http_boot_url ( self , url ) : if ( self . _is_boot_mode_uefi ( ) is True ) : self . _change_bios_setting ( { 'UefiShellStartupUrl' : url } ) else : msg = 'set_http_boot_url is not supported in the BIOS boot mode' raise exception . IloCommandNotSupportedInBiosError ( msg )
Set url to the UefiShellStartupUrl to the system in uefi boot mode .
41,610
def _get_ilo_details ( self ) : manager_uri = '/rest/v1/Managers/1' status , headers , manager = self . _rest_get ( manager_uri ) if status != 200 : msg = self . _get_extended_error ( manager ) raise exception . IloError ( msg ) mtype = self . _get_type ( manager ) if ( mtype not in [ 'Manager.0' , 'Manager.1' ] ) : msg = "%s is not a valid Manager type " % mtype raise exception . IloError ( msg ) return manager , manager_uri
Gets iLO details
41,611
def reset_ilo ( self ) : manager , reset_uri = self . _get_ilo_details ( ) action = { 'Action' : 'Reset' } status , headers , response = self . _rest_post ( reset_uri , None , action ) if ( status != 200 ) : msg = self . _get_extended_error ( response ) raise exception . IloError ( msg ) common . wait_for_ilo_after_reset ( self )
Resets the iLO .
41,612
def _get_vm_device_status ( self , device = 'FLOPPY' ) : valid_devices = { 'FLOPPY' : 'floppy' , 'CDROM' : 'cd' } if device not in valid_devices : raise exception . IloInvalidInputError ( "Invalid device. Valid devices: FLOPPY or CDROM." ) manager , uri = self . _get_ilo_details ( ) try : vmedia_uri = manager [ 'links' ] [ 'VirtualMedia' ] [ 'href' ] except KeyError : msg = ( '"VirtualMedia" section in Manager/links does not exist' ) raise exception . IloCommandNotSupportedError ( msg ) for status , hds , vmed , memberuri in self . _get_collection ( vmedia_uri ) : status , headers , response = self . _rest_get ( memberuri ) if status != 200 : msg = self . _get_extended_error ( response ) raise exception . IloError ( msg ) if ( valid_devices [ device ] in [ item . lower ( ) for item in response [ 'MediaTypes' ] ] ) : vm_device_uri = response [ 'links' ] [ 'self' ] [ 'href' ] return response , vm_device_uri msg = ( 'Virtualmedia device "' + device + '" is not' ' found on this system.' ) raise exception . IloError ( msg )
Returns the given virtual media device status and device URI
41,613
def _get_persistent_boot_devices ( self ) : headers_bios , bios_uri , bios_settings = self . _check_bios_resource ( ) boot_settings = self . _get_bios_boot_resource ( bios_settings ) try : boot_sources = boot_settings [ 'BootSources' ] except KeyError : msg = ( "BootSources resource not found." ) raise exception . IloError ( msg ) try : boot_order = boot_settings [ 'PersistentBootConfigOrder' ] except KeyError : msg = ( "PersistentBootConfigOrder resource not found." ) raise exception . IloCommandNotSupportedError ( msg ) return boot_sources , boot_order
Get details of persistent boot devices its order
41,614
def _get_firmware_update_service_resource ( self ) : manager , uri = self . _get_ilo_details ( ) try : fw_uri = manager [ 'Oem' ] [ 'Hp' ] [ 'links' ] [ 'UpdateService' ] [ 'href' ] except KeyError : msg = ( "Firmware Update Service resource not found." ) raise exception . IloCommandNotSupportedError ( msg ) return fw_uri
Gets the firmware update service uri .
41,615
def _get_tpm_capability ( self ) : tpm_values = { "NotPresent" : False , "PresentDisabled" : True , "PresentEnabled" : True } try : tpm_state = self . _get_bios_setting ( 'TpmState' ) except exception . IloCommandNotSupportedError : tpm_state = "NotPresent" tpm_result = tpm_values [ tpm_state ] return tpm_result
Retrieves if server is TPM capable or not .
41,616
def _get_cpu_virtualization ( self ) : try : cpu_vt = self . _get_bios_setting ( 'ProcVirtualization' ) except exception . IloCommandNotSupportedError : return False if cpu_vt == 'Enabled' : vt_status = True else : vt_status = False return vt_status
get cpu virtualization status .
41,617
def _get_nvdimm_n_status ( self ) : try : nvdimm_n_status = self . _get_bios_setting ( 'NvDimmNMemFunctionality' ) if nvdimm_n_status == 'Enabled' : nvn_status = True else : nvn_status = False except exception . IloCommandNotSupportedError : nvn_status = False return nvn_status
Get status of NVDIMM_N .
41,618
def _obsolete_plot_el_abund_marco ( directory , name_h5_file , mass_range , cycle , logic_stable , i_decay , file_solar , solar_factor , symbol = 'ko' ) : u . give_zip_element_z_and_names ( ) u . solar ( file_solar , solar_factor ) average_iso_abund_marco ( mass_range , cycle , logic_stable , i_decay ) mass_fractions_array_decayed = average_mass_frac_decay mass_fractions_array_not_decayed = average_mass_frac u . element_abund_marco ( i_decay , stable , jjdum , mass_fractions_array_not_decayed , mass_fractions_array_decayed ) fig = pl . figure ( ) ax = fig . add_subplot ( 1 , 1 , 1 ) xminorlocator = MultipleLocator ( 1 ) xmajorlocator = MultipleLocator ( 10 ) ax . xaxis . set_major_locator ( xmajorlocator ) ax . xaxis . set_minor_locator ( xminorlocator ) yminorlocator = MultipleLocator ( 0.1 ) ymajorlocator = MultipleLocator ( 1 ) ax . yaxis . set_major_locator ( ymajorlocator ) ax . yaxis . set_minor_locator ( yminorlocator ) ax . set_yscale ( 'log' ) if not logic_stable : for i in range ( u . z_bismuth ) : pl . plot ( z_for_elem [ i ] , elem_prod_fac [ i ] , symbol , markersize = 10. ) pl . xlabel ( '$Atomic$ $number$' , fontsize = 20 ) pl . ylabel ( '$X_{i}/X_{sun}$' , fontsize = 20 ) pl . ylim ( 1.0e-2 , 1000. ) pl . xlim ( 0 , 95 ) elif logic_stable : for i in range ( u . z_bismuth ) : if index_stable [ i ] == 1 : continue if i_decay == 2 : for i in range ( u . z_bismuth ) : if index_stable [ i ] == 1 : pl . plot ( z_for_elem [ i ] , elem_prod_fac_decayed [ i ] , symbol , markersize = 10. ) pl . xlabel ( '$Atomic$ $number$' , fontsize = 20 ) pl . ylabel ( '$X_{i}/X_{sun}$' , fontsize = 20 ) pl . ylim ( 1.0e-2 , 1000. ) pl . xlim ( 0 , 95 ) pl . grid ( ) pl . show ( )
Interface to plot elements abundances averaged over mass_range .
41,619
def get ( self , cycle_list , dataitem = None , isotope = None , sparse = 1 ) : return self . se . get ( cycle_list , dataitem , isotope , sparse )
Simple function that simply calls h5T . py get method . There are three ways to call this function .
41,620
def get_elemental_abunds ( self , cycle , index = None ) : isoabunds = self . se . get ( cycle , 'iso_massf' ) A = array ( self . se . A ) Z = array ( self . se . Z ) names = self . se . isos Zuq = list ( set ( Z ) ) Zuq . sort ( ) if index == None : index = [ 0 , len ( isoabunds ) ] if type ( index ) == list : elemabunds = [ ] for zone in range ( index [ 0 ] , index [ 1 ] ) : percent = int ( ( zone - index [ 0 ] ) * 100. / ( index [ 1 ] - index [ 0 ] ) ) sys . stdout . flush ( ) sys . stdout . write ( "\rgetting elemental abundances " + "...%d%%" % percent ) elemabunds . append ( [ sum ( isoabunds [ zone ] [ where ( Z == iZ ) ] ) for iZ in Zuq ] ) else : elemabunds = [ sum ( isoabunds [ index ] [ where ( Z == iZ ) ] ) for iZ in Zuq ] return elemabunds
returns the elemental abundances for one cycle either for the whole star or a specific zone depending upon the value of index .
41,621
def plot_prof_1 ( self , mod , species , xlim1 , xlim2 , ylim1 , ylim2 , symbol = None ) : DataPlot . plot_prof_1 ( self , species , mod , xlim1 , xlim2 , ylim1 , ylim2 , symbol )
plot one species for cycle between xlim1 and xlim2
41,622
def plot_prof_2 ( self , mod , species , xlim1 , xlim2 ) : mass = self . se . get ( mod , 'mass' ) Xspecies = self . se . get ( mod , 'yps' , species ) pyl . plot ( mass , Xspecies , '-' , label = str ( mod ) + ', ' + species ) pyl . xlim ( xlim1 , xlim2 ) pyl . legend ( )
Plot one species for cycle between xlim1 and xlim2
41,623
def plot_prof_sparse ( self , mod , species , xlim1 , xlim2 , ylim1 , ylim2 , sparse , symbol ) : mass = self . se . get ( mod , 'mass' ) Xspecies = self . se . get ( mod , 'yps' , species ) pyl . plot ( mass [ 0 : len ( mass ) : sparse ] , np . log10 ( Xspecies [ 0 : len ( Xspecies ) : sparse ] ) , symbol ) pyl . xlim ( xlim1 , xlim2 ) pyl . ylim ( ylim1 , ylim2 ) pyl . legend ( )
plot one species for cycle between xlim1 and xlim2 .
41,624
def trajectory ( self , ini , end , delta , mass_coo , age_in_sec = False , online = False ) : filename = 'traj_' + str ( mass_coo ) + '.dat' f = open ( filename , 'a' ) radius_at_mass_coo = [ ] density_at_mass_coo = [ ] temperature_at_mass_coo = [ ] masses = self . se . get ( list ( range ( ini , end + 1 , delta ) ) , 'mass' ) temps = self . se . get ( list ( range ( ini , end + 1 , delta ) ) , 'temperature' ) rhos = self . se . get ( list ( range ( ini , end + 1 , delta ) ) , 'rho' ) radii = self . se . get ( list ( range ( ini , end + 1 , delta ) ) , 'radius' ) ages = self . se . get ( list ( range ( ini , end + 1 , delta ) ) , 'age' ) cycs = list ( range ( ini , end + 1 , delta ) ) age_all = [ ] for i in range ( len ( ages ) ) : age = ages [ i ] if age_in_sec : age /= constants . one_year mass = masses [ i ] temperature = temps [ i ] rho = rhos [ i ] radius = radii [ i ] my_things = [ temperature , rho , radius ] if mass [ 0 ] > mass [ len ( mass ) - 1 ] : zone_above = where ( mass > mass_coo ) [ 0 ] [ - 1 ] zone_below = zone_above + 1 else : zone_above = where ( mass > mass_coo ) [ 0 ] [ 0 ] zone_below = zone_above - 1 if mass [ zone_below ] > mass [ zone_above ] : sys . exit ( "ERROR: finding of zone index confused" ) all_things_interplt = [ ] for thing in my_things : thing_interplt = thing [ zone_below ] + ( mass_coo - mass [ zone_below ] ) * ( thing [ zone_above ] - thing [ zone_below ] ) / ( mass [ zone_above ] - mass [ zone_below ] ) all_things_interplt . append ( thing_interplt ) this_temperature , this_rho , this_radius = all_things_interplt string = str ( cycs [ i ] ) + ' ' + str ( age ) + ' ' + str ( this_temperature ) + ' ' + str ( this_rho ) f . write ( string + "\n" ) radius_at_mass_coo . append ( this_radius ) density_at_mass_coo . append ( this_rho ) temperature_at_mass_coo . append ( this_temperature ) age_all . append ( age ) f . close ( ) if online : return FileLink ( filename ) return radius_at_mass_coo , density_at_mass_coo , temperature_at_mass_coo , age_all
create a trajectory out of a stellar model
41,625
def abup_se_plot ( mod , species ) : species = 'C-12' filename = 'ABUPP%07d0000.DAT' % mod print ( filename ) mass , c12 = np . loadtxt ( filename , skiprows = 4 , usecols = [ 1 , 18 ] , unpack = True ) c12_se = self . se . get ( mod , 'iso_massf' , 'C-12' ) mass_se = self . se . get ( mod , 'mass' ) pyl . plot ( mass , c12 ) pyl . plot ( mass_se , c12_se , 'o' , label = 'cycle ' + str ( mod ) ) pyl . legend ( )
plot species from one ABUPP file and the se file .
41,626
def decay ( self , mass_frac ) : import nuutils as u global decayed_multi_d decayed_multi_d = [ ] for iii in range ( len ( mass_frac ) ) : jj = - 1 decayed = [ ] for i in range ( len ( u . decay_raw ) ) : if u . jdum [ i ] > 0.5 : jj = jj + 1 dummy = 0. for j in range ( len ( u . decay_raw [ i ] ) ) : try : dum_str = u . decay_raw [ i ] [ j ] dummy = dummy + float ( self . mass_frac [ iii ] [ u . cl [ dum_str . lower ( ) . capitalize ( ) ] ] ) except KeyError : None except IndexError : None decayed . append ( dummy ) decayed_multi_d . append ( decayed )
this module simply calculate abundances of isotopes after decay .
41,627
def windyields ( self , ini , end , delta , ** keyw ) : if ( "tmass" in keyw ) == False : keyw [ "tmass" ] = "mass" if ( "abund" in keyw ) == False : keyw [ "abund" ] = "iso_massf" if ( "cycle" in keyw ) == False : keyw [ "cycle" ] = "cycle" print ( "Windyields() initialised. Reading files..." ) ypsinit = [ ] niso = 0 X_i = [ ] E_i = [ ] totalmass = [ ] ypssurf = [ ] cycles = [ ] first = True wc = self . _windcalc cycleret = self . se . cycles retrieve = self . se . get capp = cycles . extend tapp = totalmass . extend yapp = ypssurf . extend for i in range ( ini , end + 1 , delta ) : step = int ( i ) capp ( [ int ( cycleret [ i - ini ] ) ] ) tapp ( [ retrieve ( step , keyw [ "tmass" ] ) ] ) yapp ( [ retrieve ( step , keyw [ "abund" ] ) ] ) print ( "Reading complete. Calculating yields and ejected masses..." ) nsteps = len ( cycles ) - 1 niso = len ( ypssurf [ 0 ] ) X_i = np . zeros ( [ niso ] , float ) E_i = np . zeros ( [ niso ] , float ) X_i , E_i = wc ( first , totalmass , nsteps , niso , ypssurf , ypsinit , X_i , E_i , cycles ) return X_i , E_i
This function returns the wind yields and ejected masses .
41,628
def average_iso_abund_marco ( self , mass_range , cycle , stable , i_decay ) : import nuutils as u if not stable and i_decay == 2 : print ( 'ERROR: choose i_decay = 1' ) return self . _read_iso_abund_marco ( mass_range , cycle ) if i_decay == 2 : u . stable_specie ( ) self . decay ( self . mass_frac ) print ( 'average over used_masses range, not over original mass_range' ) print ( used_masses [ 0 ] , used_masses [ len ( used_masses ) - 1 ] , 'instead of' , mass_range [ 0 ] , mass_range [ 1 ] ) global average_mass_frac average_mass_frac = [ ] if len ( used_masses ) >= 2 : dm_tot = abs ( used_masses [ len ( used_masses ) - 1 ] - used_masses [ 0 ] ) for j in range ( len ( u . spe ) - 1 ) : temp = 0. for i in range ( len ( used_masses ) - 1 ) : dm_i = abs ( used_masses [ i + 1 ] - used_masses [ i ] ) temp = float ( self . mass_frac [ i ] [ j ] * dm_i / dm_tot ) + temp average_mass_frac . append ( temp ) elif len ( used_masses ) == 1 : print ( 'case with 1 mass zone only, not implemented yet' ) somma = 0. somma = sum ( average_mass_frac ) print ( 'departure from 1 of sum of average_mass_frac=' , abs ( 1. - somma ) ) if i_decay == 2 : global average_mass_frac_decay average_mass_frac_decay = [ ] dm_tot = abs ( used_masses [ len ( used_masses ) - 1 ] - used_masses [ 0 ] ) for j in range ( len ( u . back_ind ) ) : temp = 0. for i in range ( len ( used_masses ) - 1 ) : dm_i = abs ( used_masses [ i + 1 ] - used_masses [ i ] ) temp = float ( decayed_multi_d [ i ] [ j ] * dm_i / dm_tot ) + temp average_mass_frac_decay . append ( temp ) somma = 0. somma = sum ( average_mass_frac_decay ) print ( 'departure from 1 of sum of average_mass_frac_decay=' , abs ( 1. - somma ) )
Interface to average over mass_range .
41,629
def _get_elem_names ( self ) : import nuutils as u element_name = self . elements_names u . give_zip_element_z_and_names ( element_name ) self . z_of_element_name = u . index_z_for_elements
returns for one cycle an element name dictionary .
41,630
def get_abundance_iso_decay ( self , cycle ) : import nuutils as u masses_for_this_cycle = self . se . get ( cycle , 'mass' ) self . _read_iso_abund_marco ( [ min ( masses_for_this_cycle ) , max ( masses_for_this_cycle ) ] , cycle ) u . stable_specie ( ) self . decay ( self . mass_frac ) self . index_for_all_species = u . cl self . index_for_stable_species = u . back_ind self . decayed_stable_isotopes_per_cycle = decayed_multi_d solar_factor = 2. u . solar ( 'iniab1.0E-02.ppn_GN93' , solar_factor ) self . stable_isotope_identifier = u . jjdum self . stable_isotope_list = u . stable self . isotopic_production_factors = [ ] for i in range ( len ( masses_for_this_cycle ) ) : pf_dum = [ ] jj = 0 for j in range ( len ( self . stable_isotope_identifier ) ) : if self . stable_isotope_identifier [ j ] == 1 : pf_dum . append ( float ( old_div ( self . mass_frac [ i ] [ self . index_for_all_species [ self . stable_isotope_list [ jj ] . capitalize ( ) ] ] , u . solar_abundance [ self . stable_isotope_list [ jj ] . lower ( ) ] ) ) ) jj = jj + 1 self . isotopic_production_factors . append ( pf_dum ) self . isotopic_production_factors_decayed = [ ] for i in range ( len ( masses_for_this_cycle ) ) : pf_dum_d = [ ] jj = 0 for j in range ( len ( self . stable_isotope_identifier ) ) : if self . stable_isotope_identifier [ j ] == 1 : pf_dum_d . append ( float ( old_div ( self . decayed_stable_isotopes_per_cycle [ i ] [ self . index_for_stable_species [ self . stable_isotope_list [ jj ] . upper ( ) ] ] , u . solar_abundance [ self . stable_isotope_list [ jj ] . lower ( ) ] ) ) ) jj = jj + 1 self . isotopic_production_factors_decayed . append ( pf_dum_d )
returns the decayed stable isotopes .
41,631
def maximum_size_bytes ( self ) : return utils . max_safe ( [ device . get ( 'CapacityBytes' ) for device in self . devices if device . get ( 'CapacityBytes' ) is not None ] )
Gets the biggest disk drive
41,632
def push_power_button ( self , target_value ) : if target_value not in mappings . PUSH_POWER_BUTTON_VALUE_MAP_REV : msg = ( 'The parameter "%(parameter)s" value "%(target_value)s" is ' 'invalid. Valid values are: %(valid_power_values)s' % { 'parameter' : 'target_value' , 'target_value' : target_value , 'valid_power_values' : ( mappings . PUSH_POWER_BUTTON_VALUE_MAP_REV . keys ( ) ) } ) raise exception . InvalidInputError ( msg ) value = mappings . PUSH_POWER_BUTTON_VALUE_MAP_REV [ target_value ] target_uri = ( self . _get_hpe_push_power_button_action_element ( ) . target_uri ) self . _conn . post ( target_uri , data = { 'PushType' : value } )
Reset the system in hpe exclusive manner .
41,633
def bios_settings ( self ) : return bios . BIOSSettings ( self . _conn , utils . get_subresource_path_by ( self , 'Bios' ) , redfish_version = self . redfish_version )
Property to provide reference to BIOSSettings instance
41,634
def secure_boot ( self ) : return secure_boot . SecureBoot ( self . _conn , utils . get_subresource_path_by ( self , 'SecureBoot' ) , redfish_version = self . redfish_version )
Property to provide reference to SecureBoot instance
41,635
def ethernet_interfaces ( self ) : return ethernet_interface . EthernetInterfaceCollection ( self . _conn , self . _get_hpe_sub_resource_collection_path ( 'EthernetInterfaces' ) , redfish_version = self . redfish_version )
Provide reference to EthernetInterfacesCollection instance
41,636
def smart_storage ( self ) : return hpe_smart_storage . HPESmartStorage ( self . _conn , utils . get_subresource_path_by ( self , [ 'Oem' , 'Hpe' , 'Links' , 'SmartStorage' ] ) , redfish_version = self . redfish_version )
This property gets the object for smart storage .
41,637
def storages ( self ) : return storage . StorageCollection ( self . _conn , utils . get_subresource_path_by ( self , 'Storage' ) , redfish_version = self . redfish_version )
This property gets the list of instances for Storages
41,638
def simple_storages ( self ) : return simple_storage . SimpleStorageCollection ( self . _conn , utils . get_subresource_path_by ( self , 'SimpleStorage' ) , redfish_version = self . redfish_version )
This property gets the list of instances for SimpleStorages
41,639
def memory ( self ) : return memory . MemoryCollection ( self . _conn , utils . get_subresource_path_by ( self , 'Memory' ) , redfish_version = self . redfish_version )
Property to provide reference to MemoryCollection instance
41,640
def get_smart_storage_config ( self , smart_storage_config_url ) : return ( smart_storage_config . HPESmartStorageConfig ( self . _conn , smart_storage_config_url , redfish_version = self . redfish_version ) )
Returns a SmartStorageConfig Instance for each controller .
41,641
def _get_smart_storage_config_by_controller_model ( self , controller_model ) : ac = self . smart_storage . array_controllers . array_controller_by_model ( controller_model ) if ac : for ssc_id in self . smart_storage_config_identities : ssc_obj = self . get_smart_storage_config ( ssc_id ) if ac . location == ssc_obj . location : return ssc_obj
Returns a SmartStorageConfig Instance for controller by model .
41,642
def check_smart_storage_config_ids ( self ) : if self . smart_storage_config_identities is None : msg = ( 'The Redfish controller failed to get the ' 'SmartStorageConfig controller configurations.' ) LOG . debug ( msg ) raise exception . IloError ( msg )
Check SmartStorageConfig controllers is there in hardware .
41,643
def delete_raid ( self ) : self . check_smart_storage_config_ids ( ) any_exceptions = [ ] ld_exc_count = 0 for config_id in self . smart_storage_config_identities : try : ssc_obj = self . get_smart_storage_config ( config_id ) ssc_obj . delete_raid ( ) except exception . IloLogicalDriveNotFoundError as e : ld_exc_count += 1 except sushy . exceptions . SushyError as e : any_exceptions . append ( ( config_id , str ( e ) ) ) if any_exceptions : msg = ( 'The Redfish controller failed to delete the ' 'raid configuration in one or more controllers with ' 'Error: %(error)s' % { 'error' : str ( any_exceptions ) } ) raise exception . IloError ( msg ) if ld_exc_count == len ( self . smart_storage_config_identities ) : msg = ( 'No logical drives are found in any controllers. Nothing ' 'to delete.' ) raise exception . IloLogicalDriveNotFoundError ( msg )
Delete the raid configuration on the hardware .
41,644
def _parse_raid_config_data ( self , raid_config ) : default = ( self . smart_storage . array_controllers . get_default_controller . model ) controllers = { default : [ ] } for ld in raid_config [ 'logical_disks' ] : if 'controller' not in ld . keys ( ) : controllers [ default ] . append ( ld ) else : ctrl = ld [ 'controller' ] if ctrl not in controllers : controllers [ ctrl ] = [ ] controllers [ ctrl ] . append ( ld ) return controllers
It will parse raid config data based on raid controllers
41,645
def _post_create_read_raid ( self , raid_config ) : controllers = self . _parse_raid_config_data ( raid_config ) ld_exc_count = 0 any_exceptions = [ ] config = { 'logical_disks' : [ ] } for controller in controllers : try : ssc_obj = ( self . _get_smart_storage_config_by_controller_model ( controller ) ) if ssc_obj : result = ssc_obj . read_raid ( controller = controller ) config [ 'logical_disks' ] . extend ( result [ 'logical_disks' ] ) except exception . IloLogicalDriveNotFoundError as e : ld_exc_count += 1 except sushy . exceptions . SushyError as e : any_exceptions . append ( ( controller , str ( e ) ) ) if ld_exc_count == len ( controllers ) : msg = 'No logical drives are found in any controllers.' raise exception . IloLogicalDriveNotFoundError ( msg ) if any_exceptions : msg = ( 'The Redfish controller failed to read the ' 'raid configuration in one or more controllers with ' 'Error: %(error)s' % { 'error' : str ( any_exceptions ) } ) raise exception . IloError ( msg ) return config
Read the logical drives from the system after post - create raid
41,646
def _post_delete_read_raid ( self ) : any_exceptions = [ ] ssc_ids = self . smart_storage_config_identities config = { 'logical_disks' : [ ] } for ssc_id in ssc_ids : try : ssc_obj = self . get_smart_storage_config ( ssc_id ) ac_obj = ( self . smart_storage . array_controllers . array_controller_by_location ( ssc_obj . location ) ) if ac_obj : model = ac_obj . model result = ssc_obj . read_raid ( ) if result : config [ 'logical_disks' ] . extend ( result [ 'logical_disks' ] ) except sushy . exceptions . SushyError as e : any_exceptions . append ( ( model , str ( e ) ) ) if any_exceptions : msg = ( 'The Redfish controller failed to read the ' 'raid configuration in one or more controllers with ' 'Error: %(error)s' % { 'error' : str ( any_exceptions ) } ) raise exception . IloError ( msg ) return config
Read the logical drives from the system after post - delete raid
41,647
def _convert_json_to_entity ( entry_element , property_resolver ) : entity = Entity ( ) properties = { } edmtypes = { } odata = { } for name , value in entry_element . items ( ) : if name . startswith ( 'odata.' ) : odata [ name [ 6 : ] ] = value elif name . endswith ( '@odata.type' ) : edmtypes [ name [ : - 11 ] ] = value else : properties [ name ] = value partition_key = properties . pop ( 'PartitionKey' , None ) if partition_key : entity [ 'PartitionKey' ] = partition_key row_key = properties . pop ( 'RowKey' , None ) if row_key : entity [ 'RowKey' ] = row_key timestamp = properties . pop ( 'Timestamp' , None ) if timestamp : entity [ 'Timestamp' ] = _from_entity_datetime ( timestamp ) for name , value in properties . items ( ) : mtype = edmtypes . get ( name ) if property_resolver : mtype = property_resolver ( partition_key , row_key , name , value , mtype ) if mtype and mtype not in _EDM_TYPES : raise AzureException ( _ERROR_TYPE_NOT_SUPPORTED . format ( mtype ) ) if type ( value ) is int : mtype = EdmType . INT32 if not mtype : entity [ name ] = value else : conv = _ENTITY_TO_PYTHON_CONVERSIONS . get ( mtype ) if conv is not None : try : property = conv ( value ) except Exception as e : if property_resolver : raise AzureException ( _ERROR_INVALID_PROPERTY_RESOLVER . format ( name , value , mtype ) ) else : raise e else : property = EntityProperty ( mtype , value ) entity [ name ] = property etag = odata . get ( 'etag' ) if timestamp : etag = 'W/"datetime\'' + url_quote ( timestamp ) + '\'"' entity [ 'etag' ] = etag return entity
Convert json response to entity .
41,648
def _extract_etag ( response ) : if response and response . headers : for name , value in response . headers : if name . lower ( ) == 'etag' : return value return None
Extracts the etag from the response headers .
41,649
def generate_blob ( self , container_name , blob_name , permission = None , expiry = None , start = None , id = None , ip = None , protocol = None , cache_control = None , content_disposition = None , content_encoding = None , content_language = None , content_type = None ) : resource_path = container_name + '/' + blob_name sas = _SharedAccessHelper ( ) sas . add_base ( permission , expiry , start , ip , protocol ) sas . add_id ( id ) sas . add_resource ( 'b' ) sas . add_override_response_headers ( cache_control , content_disposition , content_encoding , content_language , content_type ) sas . add_resource_signature ( self . account_name , self . account_key , 'blob' , resource_path ) return sas . get_token ( )
Generates a shared access signature for the blob . Use the returned signature with the sas_token parameter of any BlobService .
41,650
def create_table ( self , table_name , fail_on_exist = False , timeout = None ) : _validate_not_none ( 'table' , table_name ) request = HTTPRequest ( ) request . method = 'POST' request . host = self . _get_host ( ) request . path = '/Tables' request . query = [ ( 'timeout' , _int_to_str ( timeout ) ) ] request . headers = [ _DEFAULT_CONTENT_TYPE_HEADER , _DEFAULT_PREFER_HEADER , _DEFAULT_ACCEPT_HEADER ] request . body = _get_request_body ( _convert_table_to_json ( table_name ) ) if not fail_on_exist : try : self . _perform_request ( request ) return True except AzureHttpError as ex : _dont_fail_on_exist ( ex ) return False else : self . _perform_request ( request ) return True
Creates a new table in the storage account .
41,651
def exists ( self , table_name , timeout = None ) : _validate_not_none ( 'table_name' , table_name ) request = HTTPRequest ( ) request . method = 'GET' request . host = self . _get_host ( ) request . path = '/Tables' + "('" + table_name + "')" request . headers = [ ( 'Accept' , TablePayloadFormat . JSON_NO_METADATA ) ] request . query = [ ( 'timeout' , _int_to_str ( timeout ) ) ] try : self . _perform_request ( request ) return True except AzureHttpError as ex : _dont_fail_not_exist ( ex ) return False
Returns a boolean indicating whether the table exists .
41,652
def query_entities ( self , table_name , filter = None , select = None , num_results = None , marker = None , accept = TablePayloadFormat . JSON_MINIMAL_METADATA , property_resolver = None , timeout = None ) : args = ( table_name , ) kwargs = { 'filter' : filter , 'select' : select , 'max_results' : num_results , 'marker' : marker , 'accept' : accept , 'property_resolver' : property_resolver , 'timeout' : timeout } resp = self . _query_entities ( * args , ** kwargs ) return ListGenerator ( resp , self . _query_entities , args , kwargs )
Returns a generator to list the entities in the table specified . The generator will lazily follow the continuation tokens returned by the service and stop when all entities have been returned or max_results is reached .
41,653
def get_request_partition_key ( self , request ) : if request . method == 'POST' : doc = ETree . fromstring ( request . body ) part_key = doc . find ( './atom:content/m:properties/d:PartitionKey' , _etree_entity_feed_namespaces ) if part_key is None : raise AzureBatchValidationError ( _ERROR_CANNOT_FIND_PARTITION_KEY ) return _get_etree_text ( part_key ) else : uri = url_unquote ( request . path ) pos1 = uri . find ( 'PartitionKey=\'' ) pos2 = uri . find ( '\',' , pos1 ) if pos1 == - 1 or pos2 == - 1 : raise AzureBatchValidationError ( _ERROR_CANNOT_FIND_PARTITION_KEY ) return uri [ pos1 + len ( 'PartitionKey=\'' ) : pos2 ]
Extracts PartitionKey from request . body if it is a POST request or from request . path if it is not a POST request . Only insert operation request is a POST request and the PartitionKey is in the request body .
41,654
def get_request_row_key ( self , request ) : if request . method == 'POST' : doc = ETree . fromstring ( request . body ) row_key = doc . find ( './atom:content/m:properties/d:RowKey' , _etree_entity_feed_namespaces ) if row_key is None : raise AzureBatchValidationError ( _ERROR_CANNOT_FIND_ROW_KEY ) return _get_etree_text ( row_key ) else : uri = url_unquote ( request . path ) pos1 = uri . find ( 'RowKey=\'' ) pos2 = uri . find ( '\')' , pos1 ) if pos1 == - 1 or pos2 == - 1 : raise AzureBatchValidationError ( _ERROR_CANNOT_FIND_ROW_KEY ) row_key = uri [ pos1 + len ( 'RowKey=\'' ) : pos2 ] return row_key
Extracts RowKey from request . body if it is a POST request or from request . path if it is not a POST request . Only insert operation request is a POST request and the Rowkey is in the request body .
41,655
def validate_request_table ( self , request ) : if self . batch_table : if self . get_request_table ( request ) != self . batch_table : raise AzureBatchValidationError ( _ERROR_INCORRECT_TABLE_IN_BATCH ) else : self . batch_table = self . get_request_table ( request )
Validates that all requests have the same table name . Set the table name if it is the first request for the batch operation .
41,656
def validate_request_partition_key ( self , request ) : if self . batch_partition_key : if self . get_request_partition_key ( request ) != self . batch_partition_key : raise AzureBatchValidationError ( _ERROR_INCORRECT_PARTITION_KEY_IN_BATCH ) else : self . batch_partition_key = self . get_request_partition_key ( request )
Validates that all requests have the same PartitiionKey . Set the PartitionKey if it is the first request for the batch operation .
41,657
def validate_request_row_key ( self , request ) : if self . batch_row_keys : if self . get_request_row_key ( request ) in self . batch_row_keys : raise AzureBatchValidationError ( _ERROR_DUPLICATE_ROW_KEY_IN_BATCH ) else : self . batch_row_keys . append ( self . get_request_row_key ( request ) )
Validates that all requests have the different RowKey and adds RowKey to existing RowKey list .
41,658
def begin_batch ( self ) : self . is_batch = True self . batch_table = '' self . batch_partition_key = '' self . batch_row_keys = [ ] self . batch_requests = [ ]
Starts the batch operation . Intializes the batch variables
41,659
def insert_request_to_batch ( self , request ) : self . validate_request_table ( request ) self . validate_request_partition_key ( request ) self . validate_request_row_key ( request ) self . batch_requests . append ( request )
Adds request to batch operation .
41,660
def commit_batch_requests ( self ) : batch_boundary = b'batch_' + _new_boundary ( ) changeset_boundary = b'changeset_' + _new_boundary ( ) if self . batch_requests : request = HTTPRequest ( ) request . method = 'POST' request . host = self . batch_requests [ 0 ] . host request . path = '/$batch' request . headers = [ ( 'Content-Type' , 'multipart/mixed; boundary=' + batch_boundary . decode ( 'utf-8' ) ) , ( 'Accept' , 'application/atom+xml,application/xml' ) , ( 'Accept-Charset' , 'UTF-8' ) ] request . body = b'--' + batch_boundary + b'\n' request . body += b'Content-Type: multipart/mixed; boundary=' request . body += changeset_boundary + b'\n\n' content_id = 1 for batch_request in self . batch_requests : request . body += b'--' + changeset_boundary + b'\n' request . body += b'Content-Type: application/http\n' request . body += b'Content-Transfer-Encoding: binary\n\n' request . body += batch_request . method . encode ( 'utf-8' ) request . body += b' http://' request . body += batch_request . host . encode ( 'utf-8' ) request . body += batch_request . path . encode ( 'utf-8' ) request . body += b' HTTP/1.1\n' request . body += b'Content-ID: ' request . body += str ( content_id ) . encode ( 'utf-8' ) + b'\n' content_id += 1 if not batch_request . method == 'DELETE' : request . body += b'Content-Type: application/atom+xml;type=entry\n' for name , value in batch_request . headers : if name == 'If-Match' : request . body += name . encode ( 'utf-8' ) + b': ' request . body += value . encode ( 'utf-8' ) + b'\n' break request . body += b'Content-Length: ' request . body += str ( len ( batch_request . body ) ) . encode ( 'utf-8' ) request . body += b'\n\n' request . body += batch_request . body + b'\n' else : for name , value in batch_request . headers : if name == 'If-Match' : request . body += name . encode ( 'utf-8' ) + b': ' request . body += value . encode ( 'utf-8' ) + b'\n\n' break else : request . body += b'If-Match: *\n\n' request . body += b'--' + changeset_boundary + b'--' + b'\n' request . body += b'--' + batch_boundary + b'--' request . path , request . query = _update_request_uri_query ( request ) request . headers = _update_storage_table_header ( request ) self . authentication . sign_request ( request ) response = self . perform_request ( request ) if response . status >= 300 : raise HTTPError ( response . status , _ERROR_BATCH_COMMIT_FAIL , self . respheader , response . body ) responses = self . _parse_batch_response ( response . body ) if responses and responses [ 0 ] . status >= 300 : self . _report_batch_error ( responses [ 0 ] )
Commits the batch requests .
41,661
def set_license ( self , key ) : data = { 'LicenseKey' : key } license_service_uri = ( utils . get_subresource_path_by ( self , [ 'Oem' , 'Hpe' , 'Links' , 'LicenseService' ] ) ) self . _conn . post ( license_service_uri , data = data )
Set the license on a redfish system
41,662
def virtual_media ( self ) : return virtual_media . VirtualMediaCollection ( self . _conn , utils . get_subresource_path_by ( self , 'VirtualMedia' ) , redfish_version = self . redfish_version )
Property to provide reference to VirtualMediaCollection instance .
41,663
def writeTraj ( filename = 'trajectory.input' , data = [ ] , ageunit = 0 , tunit = 0 , rhounit = 0 , idNum = 0 ) : if data == [ ] : print ( 'Please input correct data' ) print ( 'returning None' ) return None headers = [ ] if ageunit == 1 : headers . append ( 'AGEUNIT = SEC' ) elif ageunit == 0 : headers . append ( 'AGEUNIT = YRS' ) elif ageunit == 2 : headers . append ( 'AGEUNIT = logtimerev/yrs' ) if tunit == 1 : headers . append ( 'TUNIT = T9K' ) elif tunit == 0 : headers . append ( 'TUNIT = T8K' ) if rhounit == 1 : headers . append ( 'RHOUNIT = LOG' ) elif rhounit == 0 : headers . append ( 'RHOUNIT = CGS' ) headers . append ( 'ID = ' + str ( idNum ) ) write ( filename , headers , [ 'time' , 'T' , 'rho' ] , data , [ 'YRS/SEC; T8K/T9K; CGS/LOG' , "FORMAT: '(10x,A3)'" ] , trajectory = True )
Method for writeing Trajectory type ascii files files .
41,664
def get ( self , attri ) : isCol = False isHead = False if attri in self . dcols : isCol = True elif attri in self . hattrs : isHead = True else : print ( "That attribute does not exist in this File" ) print ( 'Returning None' ) if isCol : return self . getColData ( attri ) elif isHead : return hattrs
Method that dynamically determines the type of attribute that is passed into this method . Also it then returns that attribute s associated data .
41,665
def initial_finall_mass_relation ( self , marker = 'o' , linestyle = '--' ) : final_m = [ ] ini_m = [ ] for i in range ( len ( self . runs_H5_surf ) ) : sefiles = se ( self . runs_H5_out [ i ] ) ini_m . append ( sefiles . get ( "mini" ) ) h1 = sefiles . get ( int ( sefiles . se . cycles [ - 2 ] ) , 'H-1' ) mass = sefiles . get ( int ( sefiles . se . cycles [ - 2 ] ) , 'mass' ) idx = - 1 for k in range ( len ( h1 ) ) : if h1 [ k ] > 0.1 : idx = k break final_m . append ( mass [ idx ] ) label = 'Z=' + str ( sefiles . get ( 'zini' ) ) plt . plot ( ini_m , final_m , label = label , marker = marker , linestyle = linestyle ) plt . xlabel ( '$M_{Initial} [M_{\odot}]$' , size = 23 ) plt . ylabel ( '$M_{Final} [M_{\odot}]$' , size = 23 )
INtiial to final mass relation
41,666
def set_cores_massive ( self , filename = 'core_masses_massive.txt' ) : core_info = [ ] minis = [ ] for i in range ( len ( self . runs_H5_surf ) ) : sefiles = se ( self . runs_H5_out [ i ] ) mini = sefiles . get ( 'mini' ) minis . append ( mini ) incycle = int ( sefiles . se . cycles [ - 1 ] ) core_info . append ( sefiles . cores ( incycle = incycle ) ) print_info = '' for i in range ( len ( self . runs_H5_surf ) ) : if i == 0 : print 'Following returned for each initial mass' print core_info [ i ] [ 1 ] print_info += ( str ( minis [ i ] ) + ' & ' ) info = core_info [ i ] [ 0 ] for k in range ( len ( info ) ) : print_info += ( '{:.3E}' . format ( float ( core_info [ i ] [ 0 ] [ k ] ) ) + ' & ' ) print_info = ( print_info + '\n' ) f1 = open ( filename , 'a' ) f1 . write ( print_info ) f1 . close ( )
Uesse function cores in nugridse . py
41,667
def _get_request_body_bytes_only ( param_name , param_value ) : if param_value is None : return b'' if isinstance ( param_value , bytes ) : return param_value raise TypeError ( _ERROR_VALUE_SHOULD_BE_BYTES . format ( param_name ) )
Validates the request body passed in and converts it to bytes if our policy allows it .
41,668
def _get_attribute_value_of ( resource , attribute_name , default = None ) : try : return getattr ( resource , attribute_name ) except ( sushy . exceptions . SushyError , exception . MissingAttributeError ) as e : msg = ( ( 'The Redfish controller failed to get the ' 'attribute %(attribute)s from resource %(resource)s. ' 'Error %(error)s' ) % { 'error' : str ( e ) , 'attribute' : attribute_name , 'resource' : resource . __class__ . __name__ } ) LOG . debug ( msg ) return default
Gets the value of attribute_name from the resource
41,669
def get_local_gb ( system_obj ) : local_max_bytes = 0 logical_max_mib = 0 volume_max_bytes = 0 physical_max_mib = 0 drives_max_bytes = 0 simple_max_bytes = 0 smart_resource = _get_attribute_value_of ( system_obj , 'smart_storage' ) storage_resource = _get_attribute_value_of ( system_obj , 'storages' ) if smart_resource is not None : logical_max_mib = _get_attribute_value_of ( smart_resource , 'logical_drives_maximum_size_mib' , default = 0 ) if storage_resource is not None : volume_max_bytes = _get_attribute_value_of ( storage_resource , 'volumes_maximum_size_bytes' , default = 0 ) local_max_bytes = utils . max_safe ( [ ( logical_max_mib * 1024 * 1024 ) , volume_max_bytes ] ) if local_max_bytes == 0 : if smart_resource is not None : physical_max_mib = _get_attribute_value_of ( smart_resource , 'physical_drives_maximum_size_mib' , default = 0 ) if storage_resource is not None : drives_max_bytes = _get_attribute_value_of ( storage_resource , 'drives_maximum_size_bytes' , default = 0 ) simple_resource = _get_attribute_value_of ( system_obj , 'simple_storages' ) if simple_resource is not None : simple_max_bytes = _get_attribute_value_of ( simple_resource , 'maximum_size_bytes' , default = 0 ) local_max_bytes = utils . max_safe ( [ ( physical_max_mib * 1024 * 1024 ) , drives_max_bytes , simple_max_bytes ] ) local_gb = 0 if local_max_bytes > 0 : local_gb = int ( local_max_bytes / ( 1024 * 1024 * 1024 ) ) - 1 else : msg = ( 'The maximum size for the hard disk or logical ' 'volume could not be determined.' ) LOG . debug ( msg ) return local_gb
Gets the largest volume or the largest disk
41,670
def has_ssd ( system_obj ) : smart_value = False storage_value = False smart_resource = _get_attribute_value_of ( system_obj , 'smart_storage' ) if smart_resource is not None : smart_value = _get_attribute_value_of ( smart_resource , 'has_ssd' , default = False ) if smart_value : return smart_value storage_resource = _get_attribute_value_of ( system_obj , 'storages' ) if storage_resource is not None : storage_value = _get_attribute_value_of ( storage_resource , 'has_ssd' , default = False ) return storage_value
Gets if the system has any drive as SSD drive
41,671
def has_nvme_ssd ( system_obj ) : storage_value = False storage_resource = _get_attribute_value_of ( system_obj , 'storages' ) if storage_resource is not None : storage_value = _get_attribute_value_of ( storage_resource , 'has_nvme_ssd' , default = False ) return storage_value
Gets if the system has any drive as NVMe SSD drive
41,672
def get_drive_rotational_speed_rpm ( system_obj ) : speed = set ( ) smart_resource = _get_attribute_value_of ( system_obj , 'smart_storage' ) if smart_resource is not None : speed . update ( _get_attribute_value_of ( smart_resource , 'drive_rotational_speed_rpm' , default = set ( ) ) ) storage_resource = _get_attribute_value_of ( system_obj , 'storages' ) if storage_resource is not None : speed . update ( _get_attribute_value_of ( storage_resource , 'drive_rotational_speed_rpm' , default = set ( ) ) ) return speed
Gets the set of rotational speed rpms of the disks .
41,673
def create_configuration ( self , node , ports ) : target_raid_config = node . get ( 'target_raid_config' , { } ) . copy ( ) return hpssa_manager . create_configuration ( raid_config = target_raid_config )
Create RAID configuration on the bare metal .
41,674
def erase_devices ( self , node , port ) : result = { } result [ 'Disk Erase Status' ] = hpssa_manager . erase_devices ( ) result . update ( super ( ProliantHardwareManager , self ) . erase_devices ( node , port ) ) return result
Erase the drives on the bare metal .
41,675
def init_model_based_tags ( self , model ) : self . model = model if 'G7' in self . model : self . MEMORY_SIZE_TAG = "MEMORY_SIZE" self . MEMORY_SIZE_NOT_PRESENT_TAG = "Not Installed" self . NIC_INFORMATION_TAG = "NIC_INFOMATION" else : self . MEMORY_SIZE_TAG = "TOTAL_MEMORY_SIZE" self . MEMORY_SIZE_NOT_PRESENT_TAG = "N/A" self . NIC_INFORMATION_TAG = "NIC_INFORMATION"
Initializing the model based memory and NIC information tags .
41,676
def _request_ilo ( self , root , extra_headers = None ) : if self . port : urlstr = 'https://%s:%d/ribcl' % ( self . host , self . port ) else : urlstr = 'https://%s/ribcl' % ( self . host ) xml = self . _serialize_xml ( root ) headers = { "Content-length" : str ( len ( xml ) ) } if extra_headers : headers . update ( extra_headers ) kwargs = { 'headers' : headers , 'data' : xml } if self . cacert is not None : kwargs [ 'verify' ] = self . cacert else : kwargs [ 'verify' ] = False try : LOG . debug ( self . _ ( "POST %(url)s with request data: " "%(request_data)s" ) , { 'url' : urlstr , 'request_data' : MaskedRequestData ( kwargs ) } ) response = requests . post ( urlstr , ** kwargs ) response . raise_for_status ( ) except Exception as e : LOG . debug ( self . _ ( "Unable to connect to iLO. %s" ) , e ) raise exception . IloConnectionError ( e ) return response . text
Send RIBCL XML data to iLO .
41,677
def _create_dynamic_xml ( self , cmdname , tag_name , mode , subelements = None ) : root = etree . Element ( 'RIBCL' , VERSION = "2.0" ) login = etree . SubElement ( root , 'LOGIN' , USER_LOGIN = self . login , PASSWORD = self . password ) tagname = etree . SubElement ( login , tag_name , MODE = mode ) subelements = subelements or { } etree . SubElement ( tagname , cmdname ) if six . PY2 : root_iterator = root . getiterator ( cmdname ) else : root_iterator = root . iter ( cmdname ) for cmd in root_iterator : for key , value in subelements . items ( ) : cmd . set ( key , value ) return root
Create RIBCL XML to send to iLO .
41,678
def _serialize_xml ( self , root ) : if hasattr ( etree , 'tostringlist' ) : if six . PY3 : xml_content_list = [ x . decode ( "utf-8" ) for x in etree . tostringlist ( root ) ] else : xml_content_list = etree . tostringlist ( root ) xml = '\r\n' . join ( xml_content_list ) + '\r\n' else : if six . PY3 : xml_content = etree . tostring ( root ) . decode ( "utf-8" ) else : xml_content = etree . tostring ( root ) xml = xml_content + '\r\n' return xml
Serialize XML data into string
41,679
def _parse_output ( self , xml_response ) : count = 0 xml_dict = { } resp_message = None xml_start_pos = [ ] for m in re . finditer ( r"\<\?xml" , xml_response ) : xml_start_pos . append ( m . start ( ) ) while count < len ( xml_start_pos ) : if ( count == len ( xml_start_pos ) - 1 ) : result = xml_response [ xml_start_pos [ count ] : ] else : start = xml_start_pos [ count ] end = xml_start_pos [ count + 1 ] result = xml_response [ start : end ] result = result . strip ( ) message = etree . fromstring ( result ) resp = self . _validate_message ( message ) if hasattr ( resp , 'tag' ) : xml_dict = self . _elementtree_to_dict ( resp ) elif resp is not None : resp_message = resp count = count + 1 if xml_dict : return xml_dict elif resp_message is not None : return resp_message
Parse the response XML from iLO .
41,680
def _elementtree_to_dict ( self , element ) : node = { } text = getattr ( element , 'text' ) if text is not None : text = text . strip ( ) if len ( text ) != 0 : node [ 'text' ] = text node . update ( element . items ( ) ) child_nodes = { } for child in element : child_nodes . setdefault ( child . tag , [ ] ) . append ( self . _elementtree_to_dict ( child ) ) for key , value in child_nodes . items ( ) : if len ( value ) == 1 : child_nodes [ key ] = value [ 0 ] node . update ( child_nodes . items ( ) ) return node
Convert XML elementtree to dictionary .
41,681
def _validate_message ( self , message ) : if message . tag != 'RIBCL' : status = - 1 raise exception . IloClientInternalError ( message , status ) for child in message : if child . tag != 'RESPONSE' : return message status = int ( child . get ( 'STATUS' ) , 16 ) msg = child . get ( 'MESSAGE' ) if status == 0 and msg != 'No error' : return msg if status != 0 : if 'syntax error' in msg or 'Feature not supported' in msg : for cmd in BOOT_MODE_CMDS : if cmd in msg : platform = self . get_product_name ( ) msg = ( "%(cmd)s is not supported on %(platform)s" % { 'cmd' : cmd , 'platform' : platform } ) LOG . debug ( self . _ ( "Got invalid response with " "message: '%(message)s'" ) , { 'message' : msg } ) raise ( exception . IloCommandNotSupportedError ( msg , status ) ) else : LOG . debug ( self . _ ( "Got invalid response with " "message: '%(message)s'" ) , { 'message' : msg } ) raise exception . IloClientInternalError ( msg , status ) if ( status in exception . IloLoginFailError . statuses or msg in exception . IloLoginFailError . messages ) : LOG . debug ( self . _ ( "Got invalid response with " "message: '%(message)s'" ) , { 'message' : msg } ) raise exception . IloLoginFailError ( msg , status ) LOG . debug ( self . _ ( "Got invalid response with " "message: '%(message)s'" ) , { 'message' : msg } ) raise exception . IloError ( msg , status )
Validate XML response from iLO .
41,682
def _execute_command ( self , create_command , tag_info , mode , dic = { } ) : xml = self . _create_dynamic_xml ( create_command , tag_info , mode , dic ) d = self . _request_ilo ( xml ) data = self . _parse_output ( d ) LOG . debug ( self . _ ( "Received response data: %s" ) , data ) return data
Execute a command on the iLO .
41,683
def get_all_licenses ( self ) : data = self . _execute_command ( 'GET_ALL_LICENSES' , 'RIB_INFO' , 'read' ) d = { } for key , val in data [ 'GET_ALL_LICENSES' ] [ 'LICENSE' ] . items ( ) : if isinstance ( val , dict ) : d [ key ] = data [ 'GET_ALL_LICENSES' ] [ 'LICENSE' ] [ key ] [ 'VALUE' ] return d
Retrieve license type key installation date etc .
41,684
def set_pending_boot_mode ( self , value ) : dic = { 'value' : value } data = self . _execute_command ( 'SET_PENDING_BOOT_MODE' , 'SERVER_INFO' , 'write' , dic ) return data
Configures the boot mode of the system from a specific boot mode .
41,685
def get_persistent_boot_device ( self ) : result = self . _get_persistent_boot ( ) boot_mode = self . _check_boot_mode ( result ) if boot_mode == 'bios' : return result [ 0 ] [ 'value' ] value = result [ 0 ] [ 'DESCRIPTION' ] if 'HP iLO Virtual USB CD' in value : return 'CDROM' elif 'NIC' in value or 'PXE' in value : return 'NETWORK' elif common . isDisk ( value ) : return 'HDD' else : return None
Get the current persistent boot device set for the host .
41,686
def _set_persistent_boot ( self , values = [ ] ) : xml = self . _create_dynamic_xml ( 'SET_PERSISTENT_BOOT' , 'SERVER_INFO' , 'write' ) if six . PY2 : child_iterator = xml . getiterator ( ) else : child_iterator = xml . iter ( ) for child in child_iterator : for val in values : if child . tag == 'SET_PERSISTENT_BOOT' : etree . SubElement ( child , 'DEVICE' , VALUE = val ) d = self . _request_ilo ( xml ) data = self . _parse_output ( d ) return data
Configures a boot from a specific device .
41,687
def _request_host ( self ) : urlstr = 'https://%s/xmldata?item=all' % ( self . host ) kwargs = { } if self . cacert is not None : kwargs [ 'verify' ] = self . cacert else : kwargs [ 'verify' ] = False try : response = requests . get ( urlstr , ** kwargs ) response . raise_for_status ( ) except Exception as e : raise IloConnectionError ( e ) return response . text
Request host info from the server .
41,688
def get_host_uuid ( self ) : xml = self . _request_host ( ) root = etree . fromstring ( xml ) data = self . _elementtree_to_dict ( root ) return data [ 'HSI' ] [ 'SPN' ] [ 'text' ] , data [ 'HSI' ] [ 'cUUID' ] [ 'text' ]
Request host UUID of the server .
41,689
def get_host_health_data ( self , data = None ) : if not data or data and "GET_EMBEDDED_HEALTH_DATA" not in data : data = self . _execute_command ( 'GET_EMBEDDED_HEALTH' , 'SERVER_INFO' , 'read' ) return data
Request host health data of the server .
41,690
def get_host_health_power_supplies ( self , data = None ) : data = self . get_host_health_data ( data ) d = ( data [ 'GET_EMBEDDED_HEALTH_DATA' ] [ 'POWER_SUPPLIES' ] [ 'SUPPLY' ] ) if not isinstance ( d , list ) : d = [ d ] return d
Request the health power supply information .
41,691
def get_host_health_temperature_sensors ( self , data = None ) : data = self . get_host_health_data ( data ) d = data [ 'GET_EMBEDDED_HEALTH_DATA' ] [ 'TEMPERATURE' ] [ 'TEMP' ] if not isinstance ( d , list ) : d = [ d ] return d
Get the health Temp Sensor report .
41,692
def get_host_health_fan_sensors ( self , data = None ) : data = self . get_host_health_data ( data ) d = data [ 'GET_EMBEDDED_HEALTH_DATA' ] [ 'FANS' ] [ 'FAN' ] if not isinstance ( d , list ) : d = [ d ] return d
Get the health Fan Sensor Report .
41,693
def get_essential_properties ( self ) : data = self . get_host_health_data ( ) properties = { 'memory_mb' : self . _parse_memory_embedded_health ( data ) } cpus , cpu_arch = self . _parse_processor_embedded_health ( data ) properties [ 'cpus' ] = cpus properties [ 'cpu_arch' ] = cpu_arch properties [ 'local_gb' ] = self . _parse_storage_embedded_health ( data ) macs = self . _parse_nics_embedded_health ( data ) return_value = { 'properties' : properties , 'macs' : macs } return return_value
Gets essential scheduling properties as required by ironic
41,694
def _parse_storage_embedded_health ( self , data ) : local_gb = 0 storage = self . get_value_as_list ( data [ 'GET_EMBEDDED_HEALTH_DATA' ] , 'STORAGE' ) if storage is None : return local_gb minimum = local_gb for item in storage : cntlr = self . get_value_as_list ( item , 'CONTROLLER' ) if cntlr is None : continue for s in cntlr : drive = self . get_value_as_list ( s , 'LOGICAL_DRIVE' ) if drive is None : continue for item in drive : for key , val in item . items ( ) : if key == 'CAPACITY' : capacity = val [ 'VALUE' ] local_bytes = ( strutils . string_to_bytes ( capacity . replace ( ' ' , '' ) , return_int = True ) ) local_gb = int ( local_bytes / ( 1024 * 1024 * 1024 ) ) if minimum >= local_gb or minimum == 0 : minimum = local_gb if minimum : minimum = minimum - 1 return minimum
Gets the storage data from get_embedded_health
41,695
def get_value_as_list ( self , dictionary , key ) : if key not in dictionary : return None value = dictionary [ key ] if not isinstance ( value , list ) : return [ value ] else : return value
Helper function to check and convert a value to list .
41,696
def _parse_nics_embedded_health ( self , data ) : nic_data = self . get_value_as_list ( ( data [ 'GET_EMBEDDED_HEALTH_DATA' ] [ self . NIC_INFORMATION_TAG ] ) , 'NIC' ) if nic_data is None : msg = "Unable to get NIC details. Data missing" raise exception . IloError ( msg ) nic_dict = { } for item in nic_data : try : port = item [ 'NETWORK_PORT' ] [ 'VALUE' ] mac = item [ 'MAC_ADDRESS' ] [ 'VALUE' ] self . _update_nic_data_from_nic_info_based_on_model ( nic_dict , item , port , mac ) except KeyError : msg = "Unable to get NIC details. Data missing" raise exception . IloError ( msg ) return nic_dict
Gets the NIC details from get_embedded_health data
41,697
def _get_rom_firmware_version ( self , data ) : firmware_details = self . _get_firmware_embedded_health ( data ) if firmware_details : try : rom_firmware_version = ( firmware_details [ 'HP ProLiant System ROM' ] ) return { 'rom_firmware_version' : rom_firmware_version } except KeyError : return None
Gets the rom firmware version for server capabilities
41,698
def _get_number_of_gpu_devices_connected ( self , data ) : temp = self . get_value_as_list ( ( data [ 'GET_EMBEDDED_HEALTH_DATA' ] [ 'TEMPERATURE' ] ) , 'TEMP' ) count = 0 if temp is None : return { 'pci_gpu_devices' : count } for key in temp : for name , value in key . items ( ) : if name == 'LABEL' and 'GPU' in value [ 'VALUE' ] : count = count + 1 return { 'pci_gpu_devices' : count }
Gets the number of GPU devices connected to the server
41,699
def _get_firmware_update_xml_for_file_and_component ( self , filename , component ) : if component == 'ilo' : cmd_name = 'UPDATE_RIB_FIRMWARE' else : cmd_name = 'UPDATE_FIRMWARE' fwlen = os . path . getsize ( filename ) root = self . _create_dynamic_xml ( cmd_name , 'RIB_INFO' , 'write' , subelements = { 'IMAGE_LOCATION' : filename , 'IMAGE_LENGTH' : str ( fwlen ) } ) return root
Creates the dynamic xml for flashing the device firmware via iLO .