signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def show ( destination , protocol = None , ** kwargs ) : # pylint : disable = unused - argument
'''Displays all details for a certain route learned via a specific protocol .
If the protocol is not specified , will return all possible routes .
. . note : :
This function return the routes from the RIB .
In case the destination prefix is too short ,
there may be too many routes matched .
Therefore in cases of devices having a very high number of routes
it may be necessary to adjust the prefix length and request
using a longer prefix .
destination
destination prefix .
protocol ( optional )
protocol used to learn the routes to the destination .
. . versionchanged : : 2017.7.0
CLI Example :
. . code - block : : bash
salt ' my _ router ' route . show 172.16.0.0/25
salt ' my _ router ' route . show 172.16.0.0/25 bgp
Output example :
. . code - block : : python
'172.16.0.0/25 ' : [
' protocol ' : ' BGP ' ,
' last _ active ' : True ,
' current _ active ' : True ,
' age ' : 1178693,
' routing _ table ' : ' inet . 0 ' ,
' next _ hop ' : ' 192.168.0.11 ' ,
' outgoing _ interface ' : ' xe - 1/1/1.100 ' ,
' preference ' : 170,
' selected _ next _ hop ' : False ,
' protocol _ attributes ' : {
' remote _ as ' : 65001,
' metric ' : 5,
' local _ as ' : 13335,
' as _ path ' : ' ' ,
' remote _ address ' : ' 192.168.0.11 ' ,
' metric2 ' : 0,
' local _ preference ' : 0,
' communities ' : [
'0:2 ' ,
' no - export '
' preference2 ' : - 1
' inactive _ reason ' : ' '
' protocol ' : ' BGP ' ,
' last _ active ' : False ,
' current _ active ' : False ,
' age ' : 2359429,
' routing _ table ' : ' inet . 0 ' ,
' next _ hop ' : ' 192.168.0.17 ' ,
' outgoing _ interface ' : ' xe - 1/1/1.100 ' ,
' preference ' : 170,
' selected _ next _ hop ' : True ,
' protocol _ attributes ' : {
' remote _ as ' : 65001,
' metric ' : 5,
' local _ as ' : 13335,
' as _ path ' : ' ' ,
' remote _ address ' : ' 192.168.0.17 ' ,
' metric2 ' : 0,
' local _ preference ' : 0,
' communities ' : [
'0:3 ' ,
' no - export '
' preference2 ' : - 1
' inactive _ reason ' : ' Not Best in its group - Router ID ' '''
|
return salt . utils . napalm . call ( napalm_device , # pylint : disable = undefined - variable
'get_route_to' , ** { 'destination' : destination , 'protocol' : protocol } )
|
def _job_done_callback ( self , submissionid , task , result , grade , problems , tests , custom , state , archive , stdout , stderr , newsub = True ) :
"""Callback called by Client when a job is done . Updates the submission in the database with the data returned after the completion of the
job"""
|
submission = self . get_submission ( submissionid , False )
submission = self . get_input_from_submission ( submission )
data = { "status" : ( "done" if result [ 0 ] == "success" or result [ 0 ] == "failed" else "error" ) , # error only if error was made by INGInious
"result" : result [ 0 ] , "grade" : grade , "text" : result [ 1 ] , "tests" : tests , "problems" : problems , "archive" : ( self . _gridfs . put ( archive ) if archive is not None else None ) , "custom" : custom , "state" : state , "stdout" : stdout , "stderr" : stderr }
unset_obj = { "jobid" : "" , "ssh_host" : "" , "ssh_port" : "" , "ssh_password" : "" }
# Save submission to database
submission = self . _database . submissions . find_one_and_update ( { "_id" : submission [ "_id" ] } , { "$set" : data , "$unset" : unset_obj } , return_document = ReturnDocument . AFTER )
self . _hook_manager . call_hook ( "submission_done" , submission = submission , archive = archive , newsub = newsub )
for username in submission [ "username" ] :
self . _user_manager . update_user_stats ( username , task , submission , result [ 0 ] , grade , state , newsub )
if "outcome_service_url" in submission and "outcome_result_id" in submission and "outcome_consumer_key" in submission :
for username in submission [ "username" ] :
self . _lti_outcome_manager . add ( username , submission [ "courseid" ] , submission [ "taskid" ] , submission [ "outcome_consumer_key" ] , submission [ "outcome_service_url" ] , submission [ "outcome_result_id" ] )
|
def get_one_ping_per_client ( pings ) :
"""Returns a single ping for each client in the RDD .
THIS METHOD IS NOT RECOMMENDED : The ping to be returned is essentially
selected at random . It is also expensive as it requires data to be
shuffled around . It should be run only after extracting a subset with
get _ pings _ properties ."""
|
if isinstance ( pings . first ( ) , binary_type ) :
pings = pings . map ( lambda p : json . loads ( p . decode ( 'utf-8' ) ) )
filtered = pings . filter ( lambda p : "clientID" in p or "clientId" in p )
if not filtered :
raise ValueError ( "Missing clientID/clientId attribute." )
if "clientID" in filtered . first ( ) :
client_id = "clientID"
# v2
else :
client_id = "clientId"
# v4
return filtered . map ( lambda p : ( p [ client_id ] , p ) ) . reduceByKey ( lambda p1 , p2 : p1 ) . map ( lambda p : p [ 1 ] )
|
def visit_Call_35 ( self , call ) :
"""visit ` ast . Call ` nodes on Python3.5 and after"""
|
new_func , func_expl = self . visit ( call . func )
arg_expls = [ ]
new_args = [ ]
new_kwargs = [ ]
for arg in call . args :
res , expl = self . visit ( arg )
arg_expls . append ( expl )
new_args . append ( res )
for keyword in call . keywords :
res , expl = self . visit ( keyword . value )
new_kwargs . append ( ast . keyword ( keyword . arg , res ) )
if keyword . arg :
arg_expls . append ( keyword . arg + "=" + expl )
else : # # * * args have ` arg ` keywords with an . arg of None
arg_expls . append ( "**" + expl )
expl = "%s(%s)" % ( func_expl , ', ' . join ( arg_expls ) )
new_call = ast . Call ( new_func , new_args , new_kwargs )
res = self . assign ( new_call )
res_expl = self . explanation_param ( self . display ( res ) )
outer_expl = "%s\n{%s = %s\n}" % ( res_expl , res_expl , expl )
return res , outer_expl
|
def get_headers_global ( ) :
"""Defines the so - called global column headings for Arbin . res - files"""
|
headers = dict ( )
# - global column headings ( specific for Arbin )
headers [ "applications_path_txt" ] = 'Applications_Path'
headers [ "channel_index_txt" ] = 'Channel_Index'
headers [ "channel_number_txt" ] = 'Channel_Number'
headers [ "channel_type_txt" ] = 'Channel_Type'
headers [ "comments_txt" ] = 'Comments'
headers [ "creator_txt" ] = 'Creator'
headers [ "daq_index_txt" ] = 'DAQ_Index'
headers [ "item_id_txt" ] = 'Item_ID'
headers [ "log_aux_data_flag_txt" ] = 'Log_Aux_Data_Flag'
headers [ "log_chanstat_data_flag_txt" ] = 'Log_ChanStat_Data_Flag'
headers [ "log_event_data_flag_txt" ] = 'Log_Event_Data_Flag'
headers [ "log_smart_battery_data_flag_txt" ] = 'Log_Smart_Battery_Data_Flag'
headers [ "mapped_aux_conc_cnumber_txt" ] = 'Mapped_Aux_Conc_CNumber'
headers [ "mapped_aux_di_cnumber_txt" ] = 'Mapped_Aux_DI_CNumber'
headers [ "mapped_aux_do_cnumber_txt" ] = 'Mapped_Aux_DO_CNumber'
headers [ "mapped_aux_flow_rate_cnumber_txt" ] = 'Mapped_Aux_Flow_Rate_CNumber'
headers [ "mapped_aux_ph_number_txt" ] = 'Mapped_Aux_PH_Number'
headers [ "mapped_aux_pressure_number_txt" ] = 'Mapped_Aux_Pressure_Number'
headers [ "mapped_aux_temperature_number_txt" ] = 'Mapped_Aux_Temperature_Number'
headers [ "mapped_aux_voltage_number_txt" ] = 'Mapped_Aux_Voltage_Number'
headers [ "schedule_file_name_txt" ] = 'Schedule_File_Name'
# KEEP FOR CELLPY FILE FORMAT
headers [ "start_datetime_txt" ] = 'Start_DateTime'
headers [ "test_id_txt" ] = 'Test_ID'
# KEEP FOR CELLPY FILE FORMAT
headers [ "test_name_txt" ] = 'Test_Name'
# KEEP FOR CELLPY FILE FORMAT
return headers
|
def ProcessHuntFlowDone ( flow_obj , status_msg = None ) :
"""Notifis hunt about a given hunt - induced flow completion ."""
|
if not hunt . IsLegacyHunt ( flow_obj . parent_hunt_id ) :
hunt_obj = hunt . StopHuntIfCPUOrNetworkLimitsExceeded ( flow_obj . parent_hunt_id )
hunt . CompleteHuntIfExpirationTimeReached ( hunt_obj )
return
hunt_urn = rdfvalue . RDFURN ( "hunts" ) . Add ( flow_obj . parent_hunt_id )
client_urn = rdf_client . ClientURN ( flow_obj . client_id )
# Update the counter metrics separately from collections to minimize
# contention .
with aff4 . FACTORY . Open ( hunt_urn , mode = "rw" ) as fd : # Legacy AFF4 code expects token to be set .
fd . token = access_control . ACLToken ( username = fd . creator )
if flow_obj . num_replies_sent :
fd . context . clients_with_results_count += 1
fd . context . completed_clients_count += 1
fd . context . results_count += flow_obj . num_replies_sent
fd . GetRunner ( ) . SaveResourceUsage ( flow_obj . client_id , status_msg )
with aff4 . FACTORY . Open ( hunt_urn , mode = "rw" ) as fd : # Legacy AFF4 code expects token to be set .
fd . token = access_control . ACLToken ( username = fd . creator )
fd . RegisterCompletedClient ( client_urn )
if flow_obj . num_replies_sent :
fd . RegisterClientWithResults ( client_urn )
fd . StopHuntIfAverageLimitsExceeded ( )
|
def count ( self ) :
"""Count the number of distinct results of the wrapped query .
@ return : an L { int } representing the number of distinct results ."""
|
if not self . query . store . autocommit :
self . query . store . checkpoint ( )
target = ', ' . join ( [ tableClass . storeID . getColumnName ( self . query . store ) for tableClass in self . query . tableClass ] )
sql , args = self . query . _sqlAndArgs ( 'SELECT DISTINCT' , target )
sql = 'SELECT COUNT(*) FROM (' + sql + ')'
result = self . query . store . querySQL ( sql , args )
assert len ( result ) == 1 , 'more than one result: %r' % ( result , )
return result [ 0 ] [ 0 ] or 0
|
def human_id ( self ) :
"""Subclasses may override this to provide a pretty ID which can be used
for bash completion ."""
|
if self . NAME_ATTR in self . __dict__ and self . HUMAN_ID :
return utils . to_slug ( getattr ( self , self . NAME_ATTR ) )
return None
|
def legend_aesthetics ( self , layer , plot ) :
"""Return the aesthetics that contribute to the legend
Parameters
layer : Layer
Layer whose legend is to be drawn
plot : ggplot
Plot object
Returns
matched : list
List of the names of the aethetics that contribute
to the legend ."""
|
l = layer
legend_ae = set ( self . key . columns ) - { 'label' }
all_ae = ( l . mapping . keys ( ) | ( plot . mapping if l . inherit_aes else set ( ) ) | l . stat . DEFAULT_AES . keys ( ) )
geom_ae = l . geom . REQUIRED_AES | l . geom . DEFAULT_AES . keys ( )
matched = all_ae & geom_ae & legend_ae
matched = list ( matched - set ( l . geom . aes_params ) )
return matched
|
def get_json_report_object ( self , key ) :
"""Retrieve a JSON report object of the report .
: param key : The key of the report object
: return : The deserialized JSON report object ."""
|
con = ConnectionManager ( ) . get_connection ( self . _connection_alias )
return con . get_json ( self . json_report_objects [ key ] , append_base_url = False )
|
def get ( self : 'Option[Mapping[K,V]]' , key : K , default = None ) -> 'Option[V]' :
"""Gets a mapping value by key in the contained value or returns
` ` default ` ` if the key doesn ' t exist .
Args :
key : The mapping key .
default : The defauilt value .
Returns :
* ` ` Some ` ` variant of the mapping value if the key exists
and the value is not None .
* ` ` Some ( default ) ` ` if ` ` default ` ` is not None .
* : py : data : ` NONE ` if ` ` default ` ` is None .
Examples :
> > > Some ( { ' hi ' : 1 } ) . get ( ' hi ' )
Some ( 1)
> > > Some ( { } ) . get ( ' hi ' , 12)
Some ( 12)
> > > NONE . get ( ' hi ' , 12)
Some ( 12)
> > > NONE . get ( ' hi ' )
NONE"""
|
if self . _is_some :
return self . _type . maybe ( self . _val . get ( key , default ) )
return self . _type . maybe ( default )
|
def get_activities_by_ids ( self , activity_ids ) :
"""Gets an ` ` ActivityList ` ` corresponding to the given ` ` IdList ` ` .
In plenary mode , the returned list contains all of the
activities specified in the ` ` Id ` ` list , in the order of the
list , including duplicates , or an error results if an ` ` Id ` ` in
the supplied list is not found or inaccessible . Otherwise ,
inaccessible ` ` Activities ` ` may be omitted from the list and may
present the elements in any order including returning a unique
set .
arg : activity _ ids ( osid . id . IdList ) : the list of ` ` Ids ` ` to
retrieve
return : ( osid . learning . ActivityList ) - the returned ` ` Activity ` `
list
raise : NotFound - an ` ` Id was ` ` not found
raise : NullArgument - ` ` activity _ ids ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *"""
|
# Implemented from template for
# osid . resource . ResourceLookupSession . get _ resources _ by _ ids
# NOTE : This implementation currently ignores plenary view
collection = JSONClientValidated ( 'learning' , collection = 'Activity' , runtime = self . _runtime )
object_id_list = [ ]
for i in activity_ids :
object_id_list . append ( ObjectId ( self . _get_id ( i , 'learning' ) . get_identifier ( ) ) )
result = collection . find ( dict ( { '_id' : { '$in' : object_id_list } } , ** self . _view_filter ( ) ) )
result = list ( result )
sorted_result = [ ]
for object_id in object_id_list :
for object_map in result :
if object_map [ '_id' ] == object_id :
sorted_result . append ( object_map )
break
return objects . ActivityList ( sorted_result , runtime = self . _runtime , proxy = self . _proxy )
|
def register ( self ) :
"""Proxy method to register the device with the parent ."""
|
if not self . registered :
self . registered = True
if self . parent :
self . parent . register ( self )
|
def replace_persistent_volume_status ( self , name , body , ** kwargs ) : # noqa : E501
"""replace _ persistent _ volume _ status # noqa : E501
replace status of the specified PersistentVolume # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . replace _ persistent _ volume _ status ( name , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PersistentVolume ( required )
: param V1PersistentVolume body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : V1PersistentVolume
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . replace_persistent_volume_status_with_http_info ( name , body , ** kwargs )
# noqa : E501
else :
( data ) = self . replace_persistent_volume_status_with_http_info ( name , body , ** kwargs )
# noqa : E501
return data
|
def install_packages ( packages , what_for = 'for a complete setup to work properly' ) :
'''Try to install . deb packages given by list .
Return True , if packages could be installed or are installed already , or if
they cannot be installed but the user gives feedback to continue .
Else return False .'''
|
res = True
non_installed_packages = _non_installed ( packages )
packages_str = ' ' . join ( non_installed_packages )
if non_installed_packages :
with quiet ( ) :
dpkg = _has_dpkg ( )
hint = ' (You may have to install them manually)'
do_install = False
go_on = True
if dpkg :
if _is_sudoer ( 'Want to install dpkg packages' ) :
do_install = True
else :
do_install is False
# cannot install anything
info = yellow ( ' ' . join ( [ 'This deb packages are missing to be installed' , flo ( "{what_for}: " ) , ', ' . join ( non_installed_packages ) , ] ) )
question = ' Continue anyway?'
go_on = query_yes_no ( info + hint + question , default = 'no' )
else : # dpkg = = False , unable to determine if packages are installed
do_install = False
# cannot install anything
info = yellow ( ' ' . join ( [ flo ( 'Required {what_for}: ' ) , ', ' . join ( non_installed_packages ) , ] ) )
go_on = query_yes_no ( info + hint + ' Continue?' , default = 'yes' )
if not go_on :
sys . exit ( 'Abort' )
if do_install :
command = flo ( 'sudo apt-get install {packages_str}' )
res = run ( command ) . return_code == 0
return res
|
def nl_msg_in_handler_debug ( msg , arg ) :
"""https : / / github . com / thom311 / libnl / blob / libnl3_2_25 / lib / handlers . c # L114."""
|
ofd = arg or _LOGGER . debug
ofd ( '-- Debug: Received Message:' )
nl_msg_dump ( msg , ofd )
return NL_OK
|
def set_color_zones ( self , start_index , end_index , color , duration = 0 , apply = 1 , callb = None , rapid = False ) :
"""Convenience method to set the colour status zone of the device
This method will send a MultiZoneSetColorZones message to the device , and request callb be executed
when an ACK is received . The default callback will simply cache the value .
: param start _ index : Index of the start of the zone of interest
: type start _ index : int
: param end _ index : Index of the end of the zone of interest . By default start _ index + 7
: type end _ index : int
: param apply : Indicates if the colour change is to be applied or memorized . Default : 1
: type apply : int
: param value : The new state , a dictionary onf int with 4 keys Hue , Saturation , Brightness , Kelvin
: type value : dict
: param duration : The duration , in seconds , of the power state transition .
: type duration : int
: param callb : Callable to be used when the response is received . If not set ,
self . resp _ set _ label will be used .
: type callb : callable
: param rapid : Whether to ask for ack ( False ) or not ( True ) . Default False
: type rapid : bool
: returns : None
: rtype : None"""
|
if len ( color ) == 4 :
args = { "start_index" : start_index , "end_index" : end_index , "color" : color , "duration" : duration , "apply" : apply , }
mypartial = partial ( self . resp_set_multizonemultizone , args = args )
if callb :
mycallb = lambda x , y : ( mypartial ( y ) , callb ( x , y ) )
else :
mycallb = lambda x , y : mypartial ( y )
if rapid :
self . fire_and_forget ( MultiZoneSetColorZones , args , num_repeats = 1 )
mycallb ( self , None )
else :
self . req_with_ack ( MultiZoneSetColorZones , args , callb = mycallb )
|
def merge ( move , output_dir , sources ) :
"""Merge multiple results folder into one , by copying the results over to a new folder .
For a faster operation ( which on the other hand destroys the campaign data
if interrupted ) , the move option can be used to directly move results to
the new folder ."""
|
# Get paths for all campaign JSONS
jsons = [ ]
for s in sources :
filename = "%s.json" % os . path . split ( s ) [ 1 ]
jsons += [ os . path . join ( s , filename ) ]
# Check that the configuration for all campaigns is the same
reference_config = TinyDB ( jsons [ 0 ] ) . table ( 'config' )
for j in jsons [ 1 : ] :
for i , j in zip ( reference_config . all ( ) , TinyDB ( j ) . table ( 'config' ) . all ( ) ) :
assert i == j
# Create folders for new results directory
filename = "%s.json" % os . path . split ( output_dir ) [ 1 ]
output_json = os . path . join ( output_dir , filename )
output_data = os . path . join ( output_dir , 'data' )
os . makedirs ( output_data )
# Create new database
db = TinyDB ( output_json )
db . table ( 'config' ) . insert_multiple ( reference_config . all ( ) )
# Import results from all databases to the new JSON file
for s in sources :
filename = "%s.json" % os . path . split ( s ) [ 1 ]
current_db = TinyDB ( os . path . join ( s , filename ) )
db . table ( 'results' ) . insert_multiple ( current_db . table ( 'results' ) . all ( ) )
# Copy or move results to new data folder
for s in sources :
for r in glob . glob ( os . path . join ( s , 'data/*' ) ) :
basename = os . path . basename ( r )
if move :
shutil . move ( r , os . path . join ( output_data , basename ) )
else :
shutil . copytree ( r , os . path . join ( output_data , basename ) )
if move :
for s in sources :
shutil . rmtree ( os . path . join ( s , 'data/*' ) )
shutil . rmtree ( os . path . join ( s , "%s.json" % os . path . split ( s ) [ 1 ] ) )
shutil . rmtree ( s )
|
def sort_matches ( matches ) :
'''Sorts a ` ` list ` ` of matches best to worst'''
|
multipliers = { 'exact' : 10 ** 5 , 'fname' : 10 ** 4 , 'fuzzy' : 10 ** 2 , 'fuzzy_fragment' : 1 }
matches = [ ( multipliers [ x . type ] * ( x . amount if x . amount else 1 ) , x ) for x in matches ]
return [ x [ 1 ] for x in sorted ( matches , reverse = True ) ]
|
def set_footer ( self , * , text = EmptyEmbed , icon_url = EmptyEmbed ) :
"""Sets the footer for the embed content .
This function returns the class instance to allow for fluent - style
chaining .
Parameters
text : : class : ` str `
The footer text .
icon _ url : : class : ` str `
The URL of the footer icon . Only HTTP ( S ) is supported ."""
|
self . _footer = { }
if text is not EmptyEmbed :
self . _footer [ 'text' ] = str ( text )
if icon_url is not EmptyEmbed :
self . _footer [ 'icon_url' ] = str ( icon_url )
return self
|
def django_main ( server_getter ) :
"""Call this within ` _ _ main _ _ ` to start the service as a standalone server with Django support . Your server should have
` use _ django = True ` . If it does not , see ` simple _ main ` , instead .
: param server _ getter : A callable that returns the service ' s ` Server ` class ( not an instance of it ) . Your service
code should not be imported until the ` server _ getter ` callable is called , otherwise Django
errors will occur ."""
|
import os
# noinspection PyUnresolvedReferences , PyPackageRequirements
import django
parser = _get_arg_parser ( )
parser . add_argument ( '-s' , '--settings' , help = 'The settings module to use (must be importable)' , required = 'DJANGO_SETTINGS_MODULE' not in os . environ , # if env var does not exist , this argument is required
)
args = _get_args ( parser )
if args . settings :
os . environ [ 'DJANGO_SETTINGS_MODULE' ] = args . settings
warn_about_logging = False
try : # We have to import it manually , because we need to manipulate the settings before setup ( ) is called , but we
# can ' t import django . conf . settings until after setup ( ) is called .
django_settings = importlib . import_module ( os . environ [ 'DJANGO_SETTINGS_MODULE' ] )
if ( getattr ( django_settings , 'LOGGING' , None ) and django_settings . LOGGING != django_settings . SOA_SERVER_SETTINGS [ 'logging' ] ) :
warn_about_logging = True
django_settings . LOGGING = django_settings . SOA_SERVER_SETTINGS [ 'logging' ]
except ImportError :
raise ValueError ( 'Cannot import Django settings module `{}`.' . format ( os . environ [ 'DJANGO_SETTINGS_MODULE' ] ) )
except AttributeError :
raise ValueError ( 'Cannot find `SOA_SERVER_SETTINGS` in the Django settings module.' )
except KeyError :
raise ValueError ( "Cannot configure Django `LOGGING` setting because no setting `SOA_SERVER_SETTINGS['logging']` was found." , )
if django . VERSION >= ( 1 , 7 ) :
django . setup ( )
if warn_about_logging :
logging . warning ( "Django setting `LOGGING` differs from `SOA_SERVER_SETTINGS['logging']` and has been overwritten with " "the value of `SOA_SERVER_SETTINGS['logging']`." )
_run_server_reloader_wrapper ( args , server_getter ( ) )
|
def _connect ( self , target , listen , udp , ipv6 , retry ) :
"""Takes target / listen / udp / ipv6 and sets self . sock and self . peer"""
|
ty = socket . SOCK_DGRAM if udp else socket . SOCK_STREAM
fam = socket . AF_INET6 if ipv6 else socket . AF_INET
self . sock = socket . socket ( fam , ty )
if listen :
self . sock . setsockopt ( socket . SOL_SOCKET , socket . SO_REUSEADDR , 1 )
self . sock . bind ( target )
if not udp :
self . sock . listen ( 1 )
conn , addr = self . sock . accept ( )
self . sock . close ( )
self . sock = conn
self . peer = addr
else :
self . buf , self . peer = self . sock . recvfrom ( 1024 )
self . sock . connect ( self . peer )
self . _log_recv ( self . buf , False )
if self . verbose :
self . _print_verbose ( 'Connection from %s accepted' % str ( self . peer ) )
else :
while True :
try :
self . sock . connect ( target )
except ( socket . gaierror , socket . herror ) as exc :
raise NetcatError ( 'Could not connect to %r: %r' % ( target , exc ) )
except socket . error as exc :
if retry :
time . sleep ( 0.2 )
else :
raise NetcatError ( 'Could not connect to %r: %r' % ( target , exc ) )
else :
break
self . peer = target
|
def findAndHandleDeadJobs ( cls , nodeInfo , batchSystemShutdown = False ) :
"""Look at the state of all jobs registered in the individual job state files , and handle them
( clean up the disk , and run any registered defer functions )
: param str nodeInfo : The location of the workflow directory on the node .
: param bool batchSystemShutdown : Is the batch system in the process of shutting down ?
: return :"""
|
# A list of tuples of ( job name , pid or process running job , registered defer functions )
for jobState in cls . _getAllJobStates ( nodeInfo ) :
if not cls . _pidExists ( jobState [ 'jobPID' ] ) : # using same logic to prevent races as CachingFileStore . _ setupCache
myPID = str ( os . getpid ( ) )
cleanupFile = os . path . join ( jobState [ 'jobDir' ] , '.cleanup' )
with open ( os . path . join ( jobState [ 'jobDir' ] , '.' + myPID ) , 'w' ) as f :
f . write ( myPID )
while True :
try :
os . rename ( f . name , cleanupFile )
except OSError as err :
if err . errno == errno . ENOTEMPTY :
with open ( cleanupFile , 'r' ) as f :
cleanupPID = f . read ( )
if cls . _pidExists ( int ( cleanupPID ) ) : # Cleanup your own mess . It ' s only polite .
os . remove ( f . name )
break
else :
os . remove ( cleanupFile )
continue
else :
raise
else :
logger . warning ( 'Detected that job (%s) prematurely terminated. Fixing the ' 'state of the job on disk.' , jobState [ 'jobName' ] )
if not batchSystemShutdown :
logger . debug ( "Deleting the stale working directory." )
# Delete the old work directory if it still exists . Do this only during
# the life of the program and dont ' do it during the batch system
# cleanup . Leave that to the batch system cleanup code .
shutil . rmtree ( jobState [ 'jobDir' ] )
# Run any deferred functions associated with the job
logger . debug ( 'Running user-defined deferred functions.' )
cls . _runDeferredFunctions ( jobState [ 'deferredFunctions' ] )
break
|
def get_stats ( self ) :
"""Get received / dropped statistics"""
|
try :
ret = fcntl . ioctl ( self . ins , BIOCGSTATS , struct . pack ( "2I" , 0 , 0 ) )
return struct . unpack ( "2I" , ret )
except IOError :
warning ( "Unable to get stats from BPF !" )
return ( None , None )
|
def BinarySigmoid ( self , func ) :
'''Currently , caffe2 does not support this function .'''
|
n = onnx . helper . make_node ( 'HardSigmoid' , func . input , func . output , alpha = 1.0 , beta = 0.0 )
return [ n ]
|
def list_pkgs ( * packages , ** kwargs ) :
'''List the packages currently installed in a dict : :
{ ' < package _ name > ' : ' < version > ' }
External dependencies : :
Virtual package resolution requires aptitude . Because this function
uses dpkg , virtual packages will be reported as not installed .
CLI Example :
. . code - block : : bash
salt ' * ' lowpkg . list _ pkgs
salt ' * ' lowpkg . list _ pkgs httpd'''
|
pkgs = { }
cmd = 'dpkg -l {0}' . format ( ' ' . join ( packages ) )
out = __salt__ [ 'cmd.run_all' ] ( cmd , python_shell = False )
if out [ 'retcode' ] != 0 :
msg = 'Error: ' + out [ 'stderr' ]
log . error ( msg )
return msg
out = out [ 'stdout' ]
for line in out . splitlines ( ) :
if line . startswith ( 'ii ' ) :
comps = line . split ( )
pkgs [ comps [ 1 ] ] = comps [ 2 ]
return pkgs
|
def hash_file ( filepath : str ) -> str :
"""Return the hexdigest MD5 hash of content of file at ` filepath ` ."""
|
md5 = hashlib . md5 ( )
acc_hash ( filepath , md5 )
return md5 . hexdigest ( )
|
def entrez_sets_of_results ( url , retstart = False , retmax = False , count = False ) -> Optional [ List [ requests . Response ] ] :
"""Gets sets of results back from Entrez .
Entrez can only return 500 results at a time . This creates a generator that gets results by incrementing
retstart and retmax .
Parameters
url : str
The Entrez API url to use .
retstart : int
Return values starting at this index .
retmax : int
Return at most this number of values .
count : int
The number of results returned by EQuery .
Yields
requests . Response"""
|
if not retstart :
retstart = 0
if not retmax :
retmax = 500
if not count :
count = retmax
retmax = 500
# Entrez can return a max of 500
while retstart < count :
diff = count - retstart
if diff < 500 :
retmax = diff
_url = url + f'&retstart={retstart}&retmax={retmax}'
resp = entrez_try_get_multiple_times ( _url )
if resp is None :
return
retstart += retmax
yield resp
|
def get_count ( self , instance ) :
"""Haystack facets are returned as a two - tuple ( value , count ) .
The count field should contain the faceted count ."""
|
instance = instance [ 1 ]
return serializers . IntegerField ( read_only = True ) . to_representation ( instance )
|
def listens_to ( name , sender = None , weak = True ) :
"""Listens to a named signal"""
|
def decorator ( f ) :
if sender :
return signal ( name ) . connect ( f , sender = sender , weak = weak )
return signal ( name ) . connect ( f , weak = weak )
return decorator
|
def encrypt_password ( self ) :
"""encrypt password if not already encrypted"""
|
if self . password and not self . password . startswith ( '$pbkdf2' ) :
self . set_password ( self . password )
|
def load_yaml ( filepath ) :
"""Convenience function for loading yaml - encoded data from disk ."""
|
with open ( filepath ) as f :
txt = f . read ( )
return yaml . load ( txt )
|
def stop ( self , failover = False ) :
"""Stops the scheduler driver .
If the ' failover ' flag is set to False then it is expected that this
framework will never reconnect to Mesos and all of its executors and
tasks can be terminated . Otherwise , all executors and tasks will
remain running ( for some framework specific failover timeout ) allowing
the scheduler to reconnect ( possibly in the same process , or from a
different process , for example , on a different machine . )"""
|
logging . info ( 'Stops Scheduler Driver' )
return self . driver . stop ( failover )
|
def iteration ( self , node_status = True ) :
"""Execute a single model iteration
: return : Iteration _ id , Incremental node status ( dictionary node - > status )"""
|
self . clean_initial_status ( self . available_statuses . values ( ) )
actual_status = { node : nstatus for node , nstatus in future . utils . iteritems ( self . status ) }
# streaming
if self . stream_execution :
raise ValueError ( "Streaming network not allowed." )
# snapshot
else :
if self . actual_iteration == 0 :
self . actual_iteration += 1
delta , node_count , status_delta = self . status_delta ( actual_status )
if node_status :
return { "iteration" : 0 , "status" : actual_status . copy ( ) , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) }
else :
return { "iteration" : 0 , "status" : { } , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) }
for u in self . graph . nodes ( ) :
if actual_status [ u ] != 0 :
continue
if self . params [ 'model' ] [ 'adopter_rate' ] > 0 :
xk = ( 0 , 1 )
pk = ( 1 - self . params [ 'model' ] [ 'adopter_rate' ] , self . params [ 'model' ] [ 'adopter_rate' ] )
probability = stats . rv_discrete ( name = 'probability' , values = ( xk , pk ) )
number_probability = probability . rvs ( )
if number_probability == 1 :
actual_status [ u ] = 1
continue
neighbors = list ( self . graph . neighbors ( u ) )
if isinstance ( self . graph , nx . DiGraph ) :
neighbors = list ( self . graph . predecessors ( u ) )
infected = 0
for v in neighbors :
infected += self . status [ v ]
if infected > 0 and actual_status [ u ] == 0 :
infected_ratio = float ( infected ) / len ( neighbors )
if infected_ratio >= self . params [ 'nodes' ] [ 'threshold' ] [ u ] :
eventp = np . random . random_sample ( )
if eventp >= self . params [ 'nodes' ] [ 'profile' ] [ u ] :
actual_status [ u ] = 1
else :
if self . params [ 'model' ] [ 'blocked' ] != 0 :
blip = np . random . random_sample ( )
if blip > self . params [ 'model' ] [ 'blocked' ] :
actual_status [ u ] = - 1
delta , node_count , status_delta = self . status_delta ( actual_status )
self . status = actual_status
self . actual_iteration += 1
if node_status :
return { "iteration" : self . actual_iteration - 1 , "status" : delta . copy ( ) , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) }
else :
return { "iteration" : self . actual_iteration - 1 , "status" : { } , "node_count" : node_count . copy ( ) , "status_delta" : status_delta . copy ( ) }
|
def make_list ( obj , cast = True ) :
"""Converts an object * obj * to a list and returns it . Objects of types * tuple * and * set * are
converted if * cast * is * True * . Otherwise , and for all other types , * obj * is put in a new list ."""
|
if isinstance ( obj , list ) :
return list ( obj )
elif is_lazy_iterable ( obj ) :
return list ( obj )
elif isinstance ( obj , ( tuple , set ) ) and cast :
return list ( obj )
else :
return [ obj ]
|
def weight_list_to_tuple ( data , attr_name ) :
'''Converts a list of values and corresponding weights to a tuple of values'''
|
if len ( data [ 'Value' ] ) != len ( data [ 'Weight' ] ) :
raise ValueError ( 'Number of weights do not correspond to number of ' 'attributes in %s' % attr_name )
weight = np . array ( data [ 'Weight' ] )
if fabs ( np . sum ( weight ) - 1. ) > 1E-7 :
raise ValueError ( 'Weights do not sum to 1.0 in %s' % attr_name )
data_tuple = [ ]
for iloc , value in enumerate ( data [ 'Value' ] ) :
data_tuple . append ( ( value , weight [ iloc ] ) )
return data_tuple
|
def _get_funcs ( self ) :
"""Returns a 32 - bit value stating supported I2C functions .
: rtype : int"""
|
f = c_uint32 ( )
ioctl ( self . fd , I2C_FUNCS , f )
return f . value
|
def get_authuser_by_userid ( cls , request ) :
"""Get user by ID .
Used by Ticket - based auth . Is added as request method to populate
` request . user ` ."""
|
userid = authenticated_userid ( request )
if userid :
cache_request_user ( cls , request , userid )
return request . _user
|
def normalize ( self , inplace = True ) :
"""Normalizes the pdf of the continuous factor so that it integrates to
1 over all the variables .
Parameters
inplace : boolean
If inplace = True it will modify the factor itself , else would return
a new factor .
Returns
ContinuousFactor or None :
if inplace = True ( default ) returns None
if inplace = False returns a new ContinuousFactor instance .
Examples
> > > from pgmpy . factors . continuous import ContinuousFactor
> > > from scipy . stats import multivariate _ normal
> > > std _ normal _ pdf = lambda x : 2 * multivariate _ normal . pdf ( x , [ 0 , 0 ] , [ [ 1 , 0 ] , [ 0 , 1 ] ] )
> > > std _ normal = ContinuousFactor ( [ ' x1 ' , ' x2 ' ] , std _ normal _ pdf )
> > > std _ normal . assignment ( 1 , 1)
0.117099663049
> > > std _ normal . normalize ( )
> > > std _ normal . assignment ( 1 , 1)
0.0585498315243"""
|
phi = self if inplace else self . copy ( )
phi . distriution = phi . distribution . normalize ( inplace = False )
if not inplace :
return phi
|
def get_repository_hierarchy_design_session ( self , proxy ) :
"""Gets the repository hierarchy design session .
arg proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . repository . RepositoryHierarchyDesignSession ) - a
RepostoryHierarchyDesignSession
raise : OperationFailed - unable to complete request
raise : Unimplemented - supports _ repository _ hierarchy _ design ( )
is false
compliance : optional - This method must be implemented if
supports _ repository _ hierarchy _ design ( ) is true ."""
|
if not self . supports_repository_hierarchy_design ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise
# OperationFailed ( )
proxy = self . _convert_proxy ( proxy )
try :
session = sessions . RepositoryHierarchyDesignSession ( proxy , runtime = self . _runtime )
except AttributeError :
raise
# OperationFailed ( )
return session
|
def draw_capitan_score_bitmap ( self , export_path : ExportPath ) -> None :
"""Draws the 30x30 symbol into the given file
: param export _ path : The path , where the symbols should be created on disk"""
|
with Image . fromarray ( self . image_data , mode = 'L' ) as image :
image . save ( export_path . get_full_path ( ) )
|
def _ParseRecord ( self , parser_mediator , page_data , record_offset ) :
"""Parses a record from the page data .
Args :
parser _ mediator ( ParserMediator ) : parser mediator .
page _ data ( bytes ) : page data .
record _ offset ( int ) : offset of the record relative to the start
of the page .
Raises :
ParseError : when the record cannot be parsed ."""
|
record_header_map = self . _GetDataTypeMap ( 'binarycookies_record_header' )
try :
record_header = self . _ReadStructureFromByteStream ( page_data [ record_offset : ] , record_offset , record_header_map )
except ( ValueError , errors . ParseError ) as exception :
raise errors . ParseError ( ( 'Unable to map record header data at offset: 0x{0:08x} with error: ' '{1!s}' ) . format ( record_offset , exception ) )
event_data = SafariBinaryCookieEventData ( )
event_data . flags = record_header . flags
if record_header . url_offset :
data_offset = record_offset + record_header . url_offset
event_data . url = self . _ParseCString ( page_data , data_offset )
if record_header . name_offset :
data_offset = record_offset + record_header . name_offset
event_data . cookie_name = self . _ParseCString ( page_data , data_offset )
if record_header . path_offset :
data_offset = record_offset + record_header . path_offset
event_data . path = self . _ParseCString ( page_data , data_offset )
if record_header . value_offset :
data_offset = record_offset + record_header . value_offset
event_data . cookie_value = self . _ParseCString ( page_data , data_offset )
if record_header . creation_time :
date_time = dfdatetime_cocoa_time . CocoaTime ( timestamp = record_header . creation_time )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_CREATION )
parser_mediator . ProduceEventWithEventData ( event , event_data )
if record_header . expiration_time :
date_time = dfdatetime_cocoa_time . CocoaTime ( timestamp = record_header . expiration_time )
else :
date_time = dfdatetime_semantic_time . SemanticTime ( 'Not set' )
event = time_events . DateTimeValuesEvent ( date_time , definitions . TIME_DESCRIPTION_EXPIRATION )
parser_mediator . ProduceEventWithEventData ( event , event_data )
for plugin in self . _cookie_plugins :
if parser_mediator . abort :
break
if event_data . cookie_name != plugin . COOKIE_NAME :
continue
try :
plugin . UpdateChainAndProcess ( parser_mediator , cookie_name = event_data . cookie_name , cookie_data = event_data . cookie_value , url = event_data . url )
except Exception as exception : # pylint : disable = broad - except
parser_mediator . ProduceExtractionWarning ( 'plugin: {0:s} unable to parse cookie with error: {1!s}' . format ( plugin . NAME , exception ) )
|
def users_set_preferences ( self , user_id , data , ** kwargs ) :
"""Set user ’ s preferences ."""
|
return self . __call_api_post ( 'users.setPreferences' , userId = user_id , data = data , kwargs = kwargs )
|
def _write_comid_lat_lon_z ( self ) :
"""Add latitude , longitude , and z values for each netCDF feature
Remarks :
Lookup table is a CSV file with COMID , Lat , Lon ,
and Elev _ m columns .
Columns must be in that order and these must be the first
four columns ."""
|
# only add if user adds
if self . comid_lat_lon_z_file and os . path . exists ( self . comid_lat_lon_z_file ) : # get list of COMIDS
lookup_table = csv_to_list ( self . comid_lat_lon_z_file )
lookup_comids = np . array ( [ int ( float ( row [ 0 ] ) ) for row in lookup_table [ 1 : ] ] )
# Get relevant arrays while we update them
nc_comids = self . cf_nc . variables [ self . output_id_dim_name ] [ : ]
lats = self . cf_nc . variables [ 'lat' ] [ : ]
lons = self . cf_nc . variables [ 'lon' ] [ : ]
zs = self . cf_nc . variables [ 'z' ] [ : ]
min_lat = None
max_lat = None
min_lon = None
max_lon = None
z_min = None
z_max = None
# Process each row in the lookup table
for nc_index , nc_comid in enumerate ( nc_comids ) :
try :
lookup_index = np . where ( lookup_comids == nc_comid ) [ 0 ] [ 0 ] + 1
except IndexError :
log ( 'rivid %s missing in comid_lat_lon_z file' % nc_comid , 'ERROR' )
lat = float ( lookup_table [ lookup_index ] [ 1 ] )
lats [ nc_index ] = lat
if min_lat is None or lat < min_lat :
min_lat = lat
if max_lat is None or lat > max_lat :
max_lat = lat
lon = float ( lookup_table [ lookup_index ] [ 2 ] )
lons [ nc_index ] = lon
if min_lon is None or lon < min_lon :
min_lon = lon
if max_lon is None or lon > max_lon :
max_lon = lon
z = float ( lookup_table [ lookup_index ] [ 3 ] )
zs [ nc_index ] = z
if z_min is None or z < z_min :
z_min = z
if z_max is None or z > z_max :
z_max = z
# Overwrite netCDF variable values
self . cf_nc . variables [ 'lat' ] [ : ] = lats
self . cf_nc . variables [ 'lon' ] [ : ] = lons
self . cf_nc . variables [ 'z' ] [ : ] = zs
# Update metadata
if min_lat is not None :
self . cf_nc . geospatial_lat_min = min_lat
if max_lat is not None :
self . cf_nc . geospatial_lat_max = max_lat
if min_lon is not None :
self . cf_nc . geospatial_lon_min = min_lon
if max_lon is not None :
self . cf_nc . geospatial_lon_max = max_lon
if z_min is not None :
self . cf_nc . geospatial_vertical_min = z_min
if z_max is not None :
self . cf_nc . geospatial_vertical_max = z_max
else :
log ( 'No comid_lat_lon_z file. Not adding values ...' , 'INFO' )
|
def copy_meta_from ( self , ido ) :
"""Copies vtki meta data onto this object from another object"""
|
self . _active_scalar_info = ido . active_scalar_info
self . _active_vectors_info = ido . active_vectors_info
if hasattr ( ido , '_textures' ) :
self . _textures = ido . _textures
|
def _get_attribute_value_of ( resource , attribute_name , default = None ) :
"""Gets the value of attribute _ name from the resource
It catches the exception , if any , while retrieving the
value of attribute _ name from resource and returns default .
: param resource : The resource object
: attribute _ name : Property of the resource
: returns the property value if no error encountered
else return 0."""
|
try :
return getattr ( resource , attribute_name )
except ( sushy . exceptions . SushyError , exception . MissingAttributeError ) as e :
msg = ( ( 'The Redfish controller failed to get the ' 'attribute %(attribute)s from resource %(resource)s. ' 'Error %(error)s' ) % { 'error' : str ( e ) , 'attribute' : attribute_name , 'resource' : resource . __class__ . __name__ } )
LOG . debug ( msg )
return default
|
def mutualReceptions ( self , idA , idB ) :
"""Returns all pairs of dignities in mutual reception ."""
|
AB = self . receives ( idA , idB )
BA = self . receives ( idB , idA )
# Returns a product of both lists
return [ ( a , b ) for a in AB for b in BA ]
|
def parents ( self , node ) :
"""Determine all parents of node in our tree"""
|
return [ parent for parent in getattr ( node , 'parents' , [ ] ) if getattr ( parent , 'tree' , self . TREE ) == self . TREE ]
|
def apply ( self , function ) :
"""Applies a function on the value , the actual stored value will not change .
: param function : ( Function ) , A stateful serializable object which represents the Function defined on
server side .
This object must have a serializable Function counter part registered on server side with the actual
` ` org . hazelcast . core . IFunction ` ` implementation .
: return : ( object ) , the result of the function application ."""
|
check_not_none ( function , "function can't be None" )
return self . _encode_invoke ( atomic_reference_apply_codec , function = self . _to_data ( function ) )
|
def __log_density_single ( x , mean , covar ) :
"""This is just a test function to calculate
the normal density at x given mean and covariance matrix .
Note : this function is not efficient , so
_ log _ multivariate _ density is recommended for use ."""
|
n_dim = mean . shape [ 0 ]
dx = x - mean
covar_inv = scipy . linalg . inv ( covar )
covar_det = scipy . linalg . det ( covar )
den = np . dot ( np . dot ( dx . T , covar_inv ) , dx ) + n_dim * np . log ( 2 * np . pi ) + np . log ( covar_det )
return ( - 1 / 2 * den )
|
def _get_request_content ( self , message = None ) :
'''Updates all messages in message with default message
parameters .
: param message : A collection of Postmark message data
: type message : a collection of message ` dict ` s
: rtype : JSON encoded ` str `'''
|
if not message :
raise MessageError ( 'No messages to send.' )
if len ( message ) > MAX_BATCH_MESSAGES :
err = 'Maximum {0} messages allowed in batch'
raise MessageError ( err . format ( MAX_BATCH_MESSAGES ) )
message = [ self . _cast_message ( message = msg ) for msg in message ]
message = [ msg . data ( ) for msg in message ]
return json . dumps ( message , ensure_ascii = True )
|
def future_request ( self , msg , timeout = None , use_mid = None ) :
"""Send a request messsage , with future replies .
Parameters
msg : Message object
The request Message to send .
timeout : float in seconds
How long to wait for a reply . The default is the
the timeout set when creating the AsyncClient .
use _ mid : boolean , optional
Whether to use message IDs . Default is to use message IDs
if the server supports them .
Returns
A tornado . concurrent . Future that resolves with :
reply : Message object
The reply message received .
informs : list of Message objects
A list of the inform messages received ."""
|
mid = self . _get_mid_and_update_msg ( msg , use_mid )
if msg . name in self . request_handlers :
req = FakeClientRequestConnection ( self . client_connection , msg )
reply_msg = yield tornado . gen . maybe_future ( self . request_handlers [ msg . name ] ( req , msg ) )
reply_informs = req . informs_sent
else :
reply_msg = Message . reply ( msg . name , 'ok' )
reply_informs = [ ]
reply_msg . mid = mid
raise Return ( ( reply_msg , reply_informs ) )
|
def _outer_to_numpy_indexer ( key , shape ) :
"""Convert an OuterIndexer into an indexer for NumPy .
Parameters
key : Basic / OuterIndexer
An indexer to convert .
shape : tuple
Shape of the array subject to the indexing .
Returns
tuple
Tuple suitable for use to index a NumPy array ."""
|
if len ( [ k for k in key . tuple if not isinstance ( k , slice ) ] ) <= 1 : # If there is only one vector and all others are slice ,
# it can be safely used in mixed basic / advanced indexing .
# Boolean index should already be converted to integer array .
return key . tuple
else :
return _outer_to_vectorized_indexer ( key , shape ) . tuple
|
def set_security_zones_activation ( self , internal = True , external = True ) :
"""this function will set the alarm system to armed or disable it
Args :
internal ( bool ) : activates / deactivates the internal zone
external ( bool ) : activates / deactivates the external zone
Examples :
arming while being at home
> > > home . set _ security _ zones _ activation ( False , True )
arming without being at home
> > > home . set _ security _ zones _ activation ( True , True )
disarming the alarm system
> > > home . set _ security _ zones _ activation ( False , False )"""
|
data = { "zonesActivation" : { "EXTERNAL" : external , "INTERNAL" : internal } }
return self . _restCall ( "home/security/setZonesActivation" , json . dumps ( data ) )
|
def _make_absolute ( self , link ) :
"""Makes a given link absolute ."""
|
# Parse the link with stdlib .
parsed = urlparse ( link ) . _asdict ( )
# If link is relative , then join it with base _ url .
if not parsed [ 'netloc' ] :
return urljoin ( self . base_url , link )
# Link is absolute ; if it lacks a scheme , add one from base _ url .
if not parsed [ 'scheme' ] :
parsed [ 'scheme' ] = urlparse ( self . base_url ) . scheme
# Reconstruct the URL to incorporate the new scheme .
parsed = ( v for v in parsed . values ( ) )
return urlunparse ( parsed )
# Link is absolute and complete with scheme ; nothing to be done here .
return link
|
def glob ( self , pathname , ondisk = True , source = False , strings = False , exclude = None ) :
"""Returns a list of Nodes ( or strings ) matching a specified
pathname pattern .
Pathname patterns follow UNIX shell semantics : * matches
any - length strings of any characters , ? matches any character ,
and [ ] can enclose lists or ranges of characters . Matches do
not span directory separators .
The matches take into account Repositories , returning local
Nodes if a corresponding entry exists in a Repository ( either
an in - memory Node or something on disk ) .
By defafult , the glob ( ) function matches entries that exist
on - disk , in addition to in - memory Nodes . Setting the " ondisk "
argument to False ( or some other non - true value ) causes the glob ( )
function to only match in - memory Nodes . The default behavior is
to return both the on - disk and in - memory Nodes .
The " source " argument , when true , specifies that corresponding
source Nodes must be returned if you ' re globbing in a build
directory ( initialized with VariantDir ( ) ) . The default behavior
is to return Nodes local to the VariantDir ( ) .
The " strings " argument , when true , returns the matches as strings ,
not Nodes . The strings are path names relative to this directory .
The " exclude " argument , if not None , must be a pattern or a list
of patterns following the same UNIX shell semantics .
Elements matching a least one pattern of this list will be excluded
from the result .
The underlying algorithm is adapted from the glob . glob ( ) function
in the Python library ( but heavily modified ) , and uses fnmatch ( )
under the covers ."""
|
dirname , basename = os . path . split ( pathname )
if not dirname :
result = self . _glob1 ( basename , ondisk , source , strings )
else :
if has_glob_magic ( dirname ) :
list = self . glob ( dirname , ondisk , source , False , exclude )
else :
list = [ self . Dir ( dirname , create = True ) ]
result = [ ]
for dir in list :
r = dir . _glob1 ( basename , ondisk , source , strings )
if strings :
r = [ os . path . join ( str ( dir ) , x ) for x in r ]
result . extend ( r )
if exclude :
excludes = [ ]
excludeList = SCons . Util . flatten ( exclude )
for x in excludeList :
r = self . glob ( x , ondisk , source , strings )
excludes . extend ( r )
result = [ x for x in result if not any ( fnmatch . fnmatch ( str ( x ) , str ( e ) ) for e in SCons . Util . flatten ( excludes ) ) ]
return sorted ( result , key = lambda a : str ( a ) )
|
def _walk ( recursion ) :
"""Returns a recursive or non - recursive directory walker"""
|
try :
from scandir import walk as walk_function
except ImportError :
from os import walk as walk_function
if recursion :
walk = partial ( walk_function )
else :
def walk ( path ) : # pylint : disable = C0111
try :
yield next ( walk_function ( path ) )
except NameError :
yield walk_function ( path )
return walk
|
def calc_cost ( self , node_a , node_b ) :
"""get the distance between current node and the neighbor ( cost )"""
|
if node_b . x - node_a . x == 0 or node_b . y - node_a . y == 0 : # direct neighbor - distance is 1
ng = 1
else : # not a direct neighbor - diagonal movement
ng = SQRT2
# weight for weighted algorithms
if self . weighted :
ng *= node_b . weight
return node_a . g + ng
|
def get_db_uri ( config , output_dir ) :
"""Process results _ database parameters in config to format them for
set database function
: param dict config : project configuration dict
: param str output _ dir : output directory for results
: return : string for db uri"""
|
db_config = config . get ( "results_database" , { "db_uri" : "default" } )
if db_config [ 'db_uri' ] == 'default' :
return os . path . join ( output_dir , "results.sqlite" )
return db_config [ 'db_uri' ]
|
def cons ( f , mindepth ) :
"""Makes a list of lists of reads at each site"""
|
C = ClustFile ( f )
for data in C :
names , seqs , nreps = zip ( * data )
total_nreps = sum ( nreps )
# Depth filter
if total_nreps < mindepth :
continue
S = [ ]
for name , seq , nrep in data : # Append sequence * number of dereps
S . append ( [ seq , nrep ] )
# Make list for each site in sequences
res = stack ( S )
yield [ x [ : 4 ] for x in res if sum ( x [ : 4 ] ) >= mindepth ]
|
def _getOpenChoices ( self ) :
"""Go through all possible sites to find applicable . cfg files .
Return as an iterable ."""
|
tsk = self . _taskParsObj . getName ( )
taskFiles = set ( )
dirsSoFar = [ ]
# this helps speed this up ( skip unneeded globs )
# last dir
aDir = os . path . dirname ( self . _taskParsObj . filename )
if len ( aDir ) < 1 :
aDir = os . curdir
dirsSoFar . append ( aDir )
taskFiles . update ( cfgpars . getCfgFilesInDirForTask ( aDir , tsk ) )
# current dir
aDir = os . getcwd ( )
if aDir not in dirsSoFar :
dirsSoFar . append ( aDir )
taskFiles . update ( cfgpars . getCfgFilesInDirForTask ( aDir , tsk ) )
# task ' s python pkg dir ( if tsk = = python pkg name )
try :
x , pkgf = cfgpars . findCfgFileForPkg ( tsk , '.cfg' , taskName = tsk , pkgObj = self . _taskParsObj . getAssocPkg ( ) )
taskFiles . update ( ( pkgf , ) )
except cfgpars . NoCfgFileError :
pass
# no big deal - maybe there is no python package
# user ' s own resourceDir
aDir = self . _rcDir
if aDir not in dirsSoFar :
dirsSoFar . append ( aDir )
taskFiles . update ( cfgpars . getCfgFilesInDirForTask ( aDir , tsk ) )
# extra loc - see if they used the app ' s env . var
aDir = dirsSoFar [ 0 ]
# flag to skip this if no env var found
envVarName = APP_NAME . upper ( ) + '_CFG'
if envVarName in os . environ :
aDir = os . environ [ envVarName ]
if aDir not in dirsSoFar :
dirsSoFar . append ( aDir )
taskFiles . update ( cfgpars . getCfgFilesInDirForTask ( aDir , tsk ) )
# At the very end , add an option which we will later interpret to mean
# to open the file dialog .
taskFiles = list ( taskFiles )
# so as to keep next item at end of seq
taskFiles . sort ( )
taskFiles . append ( "Other ..." )
return taskFiles
|
def get_sidecar_nodes ( self ) -> Iterator [ PostSidecarNode ] :
"""Sidecar nodes of a Post with typename = = GraphSidecar ."""
|
if self . typename == 'GraphSidecar' :
for edge in self . _field ( 'edge_sidecar_to_children' , 'edges' ) :
node = edge [ 'node' ]
is_video = node [ 'is_video' ]
yield PostSidecarNode ( is_video = is_video , display_url = node [ 'display_url' ] , video_url = node [ 'video_url' ] if is_video else None )
|
def scrape ( self , selector , cleaner = None , processor = None ) :
"""Scrape the value for this field from the selector ."""
|
# Apply CSS or XPath expression to the selector
selected = selector . xpath ( self . selection ) if self . xpath else selector . css ( self . selection )
# Extract the value and apply regular expression if specified
value = selected . re ( self . re ) if self . re else selected . extract ( raw = self . raw , cleaner = cleaner )
return self . _post_scrape ( value , processor = processor )
|
def _gatherUpdatedJobs ( self , updatedJobTuple ) :
"""Gather any new , updated jobGraph from the batch system"""
|
jobID , result , wallTime = updatedJobTuple
# easy , track different state
try :
updatedJob = self . jobBatchSystemIDToIssuedJob [ jobID ]
except KeyError :
logger . warn ( "A result seems to already have been processed " "for job %s" , jobID )
else :
if result == 0 :
cur_logger = ( logger . debug if str ( updatedJob . jobName ) . startswith ( CWL_INTERNAL_JOBS ) else logger . info )
cur_logger ( 'Job ended successfully: %s' , updatedJob )
if self . toilMetrics :
self . toilMetrics . logCompletedJob ( updatedJob )
else :
logger . warn ( 'Job failed with exit value %i: %s' , result , updatedJob )
self . processFinishedJob ( jobID , result , wallTime = wallTime )
|
def unsubscribe ( self , message , handler ) :
'''Removes handler from message listeners .
: param str message :
Name of message to unsubscribe handler from .
: param callable handler :
Callable that should be removed as handler for ` message ` .'''
|
with self . _lock :
self . _subscribers [ message ] . remove ( WeakCallable ( handler ) )
|
def all_finite ( self , X ) :
"""returns true if X is finite , false , otherwise"""
|
# Adapted from sklearn utils : _ assert _ all _ finite ( X )
# First try an O ( n ) time , O ( 1 ) space solution for the common case that
# everything is finite ; fall back to O ( n ) space np . isfinite to prevent
# false positives from overflow in sum method .
# Note : this is basically here because sklearn tree . py uses float32 internally ,
# and float64 ' s that are finite are not finite in float32.
if ( X . dtype . char in np . typecodes [ 'AllFloat' ] and not np . isfinite ( np . asarray ( X , dtype = 'float32' ) . sum ( ) ) and not np . isfinite ( np . asarray ( X , dtype = 'float32' ) ) . all ( ) ) :
return False
return True
|
def construct_getatt ( node ) :
"""Reconstruct ! GetAtt into a list"""
|
if isinstance ( node . value , six . text_type ) :
return node . value . split ( "." , 1 )
elif isinstance ( node . value , list ) :
return [ s . value for s in node . value ]
else :
raise ValueError ( "Unexpected node type: {}" . format ( type ( node . value ) ) )
|
def get_request_token ( self ) :
"""Return the request token for this API . If we ' ve not fetched it yet ,
go out , request and memoize it ."""
|
if self . _request_token is None :
self . _request_token = self . _get_request_token ( )
return self . _request_token
|
def NewFromHtml ( html , alpha = 1.0 , wref = _DEFAULT_WREF ) :
'''Create a new instance based on the specifed HTML color definition .
Parameters :
: html :
The HTML definition of the color ( # RRGGBB or # RGB or a color name ) .
: alpha :
The color transparency [ 0 . . . 1 ] , default is opaque .
: wref :
The whitepoint reference , default is 2 ° D65.
Returns :
A grapefruit . Color instance .
> > > str ( Color . NewFromHtml ( ' # ff8000 ' ) )
' ( 1 , 0.501961 , 0 , 1 ) '
> > > str ( Color . NewFromHtml ( ' ff8000 ' ) )
' ( 1 , 0.501961 , 0 , 1 ) '
> > > str ( Color . NewFromHtml ( ' # f60 ' ) )
' ( 1 , 0.4 , 0 , 1 ) '
> > > str ( Color . NewFromHtml ( ' f60 ' ) )
' ( 1 , 0.4 , 0 , 1 ) '
> > > str ( Color . NewFromHtml ( ' lemonchiffon ' ) )
' ( 1 , 0.980392 , 0.803922 , 1 ) '
> > > str ( Color . NewFromHtml ( ' # ff8000 ' , 0.5 ) )
' ( 1 , 0.501961 , 0 , 0.5 ) ' '''
|
return Color ( Color . HtmlToRgb ( html ) , 'rgb' , alpha , wref )
|
def open_remote_url ( urls , ** kwargs ) :
"""Open the url and check that it stores a file .
Args :
: urls : Endpoint to take the file"""
|
if isinstance ( urls , str ) :
urls = [ urls ]
for url in urls :
try :
web_file = requests . get ( url , stream = True , ** kwargs )
if 'html' in web_file . headers [ 'content-type' ] :
raise ValueError ( "HTML source file retrieved." )
return web_file
except Exception as ex :
logger . error ( 'Fail to open remote url - {}' . format ( ex ) )
continue
|
async def _connect_sentinel ( self , address , timeout , pools ) :
"""Try to connect to specified Sentinel returning either
connections pool or exception ."""
|
try :
with async_timeout ( timeout , loop = self . _loop ) :
pool = await create_pool ( address , minsize = 1 , maxsize = 2 , parser = self . _parser_class , loop = self . _loop )
pools . append ( pool )
return pool
except asyncio . TimeoutError as err :
sentinel_logger . debug ( "Failed to connect to Sentinel(%r) within %ss timeout" , address , timeout )
return err
except Exception as err :
sentinel_logger . debug ( "Error connecting to Sentinel(%r): %r" , address , err )
return err
|
def show ( self , viewer = None , ** kwargs ) :
"""Display the current scene .
Parameters
viewer : str ' gl ' : open a pyglet window
str , ' notebook ' : return ipython . display . HTML
None : automatically pick based on whether or not
we are in an ipython notebook
smooth : bool
Turn on or off automatic smooth shading"""
|
if viewer is None : # check to see if we are in a notebook or not
from . . viewer import in_notebook
viewer = [ 'gl' , 'notebook' ] [ int ( in_notebook ( ) ) ]
if viewer == 'gl' : # this imports pyglet , and will raise an ImportError
# if pyglet is not available
from . . viewer import SceneViewer
return SceneViewer ( self , ** kwargs )
elif viewer == 'notebook' :
from . . viewer import scene_to_notebook
return scene_to_notebook ( self , ** kwargs )
else :
raise ValueError ( 'viewer must be "gl", "notebook", or None' )
|
def getWorkingCollisionBoundsInfo ( self ) :
"""Returns the number of Quads if the buffer points to null . Otherwise it returns Quads
into the buffer up to the max specified from the working copy ."""
|
fn = self . function_table . getWorkingCollisionBoundsInfo
pQuadsBuffer = HmdQuad_t ( )
punQuadsCount = c_uint32 ( )
result = fn ( byref ( pQuadsBuffer ) , byref ( punQuadsCount ) )
return result , pQuadsBuffer , punQuadsCount . value
|
def setStyle ( self , styleName , styleValue ) :
'''setStyle - Sets a style param . Example : " display " , " block "
If you need to set many styles on an element , use setStyles instead .
It takes a dictionary of attribute , value pairs and applies it all in one go ( faster )
To remove a style , set its value to empty string .
When all styles are removed , the " style " attribute will be nullified .
@ param styleName - The name of the style element
@ param styleValue - The value of which to assign the style element
@ return - String of current value of " style " after change is made .'''
|
myAttributes = self . _attributes
if 'style' not in myAttributes :
myAttributes [ 'style' ] = "%s: %s" % ( styleName , styleValue )
else :
setattr ( myAttributes [ 'style' ] , styleName , styleValue )
|
def target_internal_dependencies ( target ) :
"""Returns internal Jarable dependencies that were " directly " declared .
Directly declared deps are those that are explicitly listed in the definition of a
target , rather than being depended on transitively . But in order to walk through
aggregator targets such as ` target ` , ` dependencies ` , or ` jar _ library ` , this recursively
descends the dep graph and stops at Jarable instances ."""
|
for dep in target . dependencies :
if isinstance ( dep , Jarable ) :
yield dep
else :
for childdep in target_internal_dependencies ( dep ) :
yield childdep
|
def unescape_single_character ( match ) :
"""Unescape a single escape sequence found from a MySQL string literal ,
according to the rules defined at :
https : / / dev . mysql . com / doc / refman / 5.6 / en / string - literals . html # character - escape - sequences
: param match : Regular expression match object .
: return : Unescaped version of given escape sequence .
: rtype : str"""
|
value = match . group ( 0 )
assert value . startswith ( "\\" )
return MYSQL_STRING_ESCAPE_SEQUENCE_MAPPING . get ( value ) or value [ 1 : ]
|
def set_default_region ( self , region ) :
"""This sets the default region for detecting license plates . For example ,
setting region to " md " for Maryland or " fr " for France .
: param region : A unicode / ascii string ( Python 2/3 ) or bytes array ( Python 3)
: return : None"""
|
region = _convert_to_charp ( region )
self . _set_default_region_func ( self . alpr_pointer , region )
|
def prepare_destruction ( self ) :
"""Get rid of circular references"""
|
self . _tool = None
self . _painter = None
self . relieve_model ( self . _selection )
self . _selection = None
# clear observer class attributes , also see ExtendenController . destroy ( )
self . _Observer__PROP_TO_METHS . clear ( )
self . _Observer__METH_TO_PROPS . clear ( )
self . _Observer__PAT_TO_METHS . clear ( )
self . _Observer__METH_TO_PAT . clear ( )
self . _Observer__PAT_METH_TO_KWARGS . clear ( )
|
def draw ( self ) :
"""Draws the image at the given location ."""
|
if not self . visible :
return
self . window . blit ( self . image , self . loc )
|
def _maxscore ( self ) :
"""m . _ maxscore ( ) - - Sets self . maxscore and self . minscore"""
|
total = 0
lowtot = 0
for lli in self . ll :
total = total + max ( lli . values ( ) )
lowtot = lowtot + min ( lli . values ( ) )
self . maxscore = total
self . minscore = lowtot
|
def save_model ( ) :
"""Save cnn model
Returns
callback : A callback function that can be passed as epoch _ end _ callback to fit"""
|
if not os . path . exists ( "checkpoint" ) :
os . mkdir ( "checkpoint" )
return mx . callback . do_checkpoint ( "checkpoint/checkpoint" , args . save_period )
|
def cells ( self ) :
'''Returns an interator of all cells in the table .'''
|
for line in self . text . splitlines ( ) :
for cell in self . getcells ( line ) :
yield cell
|
def universal_extract_paragraphs ( xml ) :
"""Extract paragraphs from xml that could be from different sources
First try to parse the xml as if it came from elsevier . if we do not
have valid elsevier xml this will throw an exception . the text extraction
function in the pmc client may not throw an exception when parsing elsevier
xml , silently processing the xml incorrectly
Parameters
xml : str
Either an NLM xml , Elsevier xml or plaintext
Returns
paragraphs : str
Extracted plaintext paragraphs from NLM or Elsevier XML"""
|
try :
paragraphs = elsevier_client . extract_paragraphs ( xml )
except Exception :
paragraphs = None
if paragraphs is None :
try :
paragraphs = pmc_client . extract_paragraphs ( xml )
except Exception :
paragraphs = [ xml ]
return paragraphs
|
def on_backward_end ( self , ** kwargs : Any ) -> None :
"Convert the gradients back to FP32 and divide them by the scale ."
|
if self . dynamic and grad_overflow ( self . model_params ) and self . loss_scale > 1 :
self . loss_scale /= 2
self . noskip = 0
# The step will be skipped since we don ' t update the master grads so they are all None or zero
else :
model_g2master_g ( self . model_params , self . master_params , self . flat_master )
for group in self . master_params :
for param in group :
if param . grad is not None :
param . grad . div_ ( self . loss_scale )
if self . clip is not None :
for group in self . master_params :
nn . utils . clip_grad_norm_ ( group , self . clip )
if not self . dynamic :
return
self . noskip += 1
if self . noskip >= self . max_noskip and self . loss_scale < self . max_scale :
self . loss_scale *= 2
self . noskip = 0
|
def upload_plugin ( self , plugin_path ) :
"""Provide plugin path for upload into Jira e . g . useful for auto deploy
: param plugin _ path :
: return :"""
|
files = { 'plugin' : open ( plugin_path , 'rb' ) }
headers = { 'X-Atlassian-Token' : 'nocheck' }
upm_token = self . request ( method = 'GET' , path = 'rest/plugins/1.0/' , headers = headers , trailing = True ) . headers [ 'upm-token' ]
url = 'rest/plugins/1.0/?token={upm_token}' . format ( upm_token = upm_token )
return self . post ( url , files = files , headers = headers )
|
def UV_B ( self ) :
"""returns UV = all respected U - > Ux in ternary coding ( 1 = V , 2 = U )"""
|
h = reduce ( lambda x , y : x & y , ( B ( g , self . width - 1 ) for g in self ) )
return UV_B ( h , self . width )
|
def get_elections ( self , obj ) :
"""All elections on an election day ."""
|
election_day = ElectionDay . objects . get ( date = self . context [ 'election_date' ] )
elections = Election . objects . filter ( race__office = obj , election_day = election_day )
return ElectionSerializer ( elections , many = True ) . data
|
def struct ( self ) :
"""XML - RPC - friendly representation of the current object state"""
|
data = { }
for var , fmap in self . _def . items ( ) :
if hasattr ( self , var ) :
data . update ( fmap . get_outputs ( getattr ( self , var ) ) )
return data
|
def load ( hdf5_file_name , data , minPts , eps = None , quantile = 50 , subsamples_matrix = None , samples_weights = None , metric = 'minkowski' , p = 2 , verbose = True ) :
"""Determines the radius ' eps ' for DBSCAN clustering of ' data ' in an adaptive , data - dependent way .
Parameters
hdf5 _ file _ name : file object or string
The handle or name of an HDF5 data structure where any array needed for DBSCAN
and too large to fit into memory is to be stored .
data : array of shape ( n _ samples , n _ features )
An array of features retained from the data - set to be analysed .
Subsamples of this curated data - set can also be analysed by a call to DBSCAN by providing an appropriate
list of selected samples labels , stored in ' subsamples _ matrix ' ( see below ) .
subsamples _ matrix : array of shape ( n _ runs , n _ subsamples ) , optional ( default = None )
Each row of this matrix contains a set of indices identifying the samples selected from the whole data - set
for each of ' n _ runs ' independent rounds of DBSCAN clusterings .
minPts : int
The number of points within an epsilon - radius hypershpere for the said region to qualify as dense .
eps : float , optional ( default = None )
Sets the maximum distance separating two data - points for those data - points to be considered
as part of the same neighborhood .
quantile : int , optional ( default = 50)
If ' eps ' is not provided by the user , it will be determined as the ' quantile ' of the distribution
of the k - nearest distances to each sample , with k set to ' minPts ' .
samples _ weights : array of shape ( n _ runs , n _ samples ) , optional ( default = None )
Holds the weights of each sample . A sample with weight greater than ' minPts ' is guaranteed to be
a core sample ; a sample with negative weight tends to prevent its ' eps ' - neighbors from being core .
Weights are absolute and default to 1.
metric : string or callable , optional ( default = ' euclidean ' )
The metric to use for computing the pairwise distances between samples
( each sample corresponds to a row in ' data ' ) . If metric is a string or callable , it must be compatible
with metrics . pairwise . pairwise _ distances .
p : float , optional ( default = 2)
If a Minkowski metric is used , ' p ' determines its power .
verbose : Boolean , optional ( default = True )
Whether to display messages reporting the status of the computations and the time it took
to complete each major stage of the algorithm .
Returns
eps : float
The parameter of DBSCAN clustering specifying if points are density - reachable .
This is either a copy of the value provided at input or , if the user did not specify a value of ' eps ' at input ,
the return value if the one determined from k - distance graphs from the data - set .
References
Ester , M . , H . P . Kriegel , J . Sander and X . Xu , " A Density - Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise " .
In : Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining , Portland , OR , AAAI Press , pp . 226-231 . 1996"""
|
data = np . array ( data , copy = False )
if data . ndim > 2 :
raise ValueError ( "\nERROR: DBSCAN_multiplex @ load:\n" "the data array is of dimension %d. Please provide a two-dimensional " "array instead.\n" % data . ndim )
if subsamples_matrix is None :
subsamples_matrix = np . arange ( data . shape [ 0 ] , dtype = int )
subsamples_matrix = subsamples_matrix . reshape ( 1 , - 1 )
else :
subsamples_matrix = np . array ( subsamples_matrix , copy = False )
if subsamples_matrix . ndim > 2 :
raise ValueError ( "\nERROR: DBSCAN_multiplex @ load:\n" "the array of subsampled indices is of dimension %d. " "Please provide a two-dimensional array instead.\n" % subsamples_matrix . ndim )
if ( data . dtype . char in np . typecodes [ 'AllFloat' ] and not np . isfinite ( data . sum ( ) ) and not np . all ( np . isfinite ( data ) ) ) :
raise ValueError ( '\nERROR: DBSCAN_multiplex @ load:\n' 'the data vector contains at least one infinite or NaN entry.\n' )
if ( subsamples_matrix . dtype . type is np . int_ and not np . isfinite ( subsamples_matrix . sum ( ) ) and not np . all ( np . isfinite ( subsamples_matrix ) ) ) :
raise ValueError ( '\nERROR: DBSCAN_multiplex @ load:\n' 'the array of subsampled indices contains at least one infinite or NaN entry.\n' )
if not np . all ( subsamples_matrix >= 0 ) :
raise ValueError ( '\nERROR: DBSCAN_multiplex @ load:\n' 'the sampled indices should all be positive integers.\n' )
N_samples = data . shape [ 0 ]
N_runs , N_subsamples = subsamples_matrix . shape
if N_subsamples > N_samples :
raise ValueError ( '\nERROR: DBSCAN_multiplex @ load:\n' 'the number of sampled indices cannot exceed the total number of samples in the whole data-set.\n' )
for i in range ( N_runs ) :
subsamples_matrix [ i ] = np . unique ( subsamples_matrix [ i ] )
if not isinstance ( minPts , int ) :
raise TypeError ( "\nERROR: DBSCAN_multiplex @ load:\n" "the parameter 'minPts' must be an integer.\n" )
if minPts < 2 :
raise ValueError ( "\nERROR: DBSCAN_multiplex @ load:\n" "the value of 'minPts' must be larger than 1.\n" )
if eps is None : # Determine the parameter ' eps ' as the median of the distribution
# of the maximum of the minPts - nearest neighbors distances for each sample .
if verbose :
print ( ( "INFO: DBSCAN_multiplex @ load:\n" "starting the determination of an appropriate value of 'eps' for this data-set" " and for the other parameter of the DBSCAN algorithm set to {minPts}.\n" "This might take a while." . format ( ** locals ( ) ) ) )
beg_eps = time . time ( )
quantile = np . rint ( quantile )
quantile = np . clip ( quantile , 0 , 100 )
k_distances = kneighbors_graph ( data , minPts , mode = 'distance' , metric = metric , p = p ) . data
radii = np . zeros ( N_samples , dtype = float )
for i in range ( 0 , minPts ) :
radii = np . maximum ( radii , k_distances [ i : : minPts ] )
if quantile == 50 :
eps = round ( np . median ( radii , overwrite_input = True ) , 4 )
else :
eps = round ( np . percentile ( radii , quantile ) , 4 )
end_eps = time . time ( )
if verbose :
print ( ( "\nINFO: DBSCAN_multiplex @ load:\n" "done with evaluating parameter 'eps' from the data-set provided." " This took {} seconds. Value of epsilon: {}." . format ( round ( end_eps - beg_eps , 4 ) , eps ) ) )
else :
if not ( isinstance ( eps , float ) or isinstance ( eps , int ) ) :
raise ValueError ( "\nERROR: DBSCAN_multiplex @ load:\n" "please provide a numeric value for the radius 'eps'.\n" )
if not eps > 0.0 :
raise ValueError ( "\nERROR: DBSCAN_multiplex @ load:\n" "the radius 'eps' must be positive.\n" )
eps = round ( eps , 4 )
# For all samples with a large enough neighborhood , ' neighborhoods _ indices '
# and ' neighborhoods _ indptr ' help us find the neighbors to every sample . Note
# that this definition of neighbors leaves the original point in ,
# which will be considered later .
if verbose :
print ( ( "\nINFO: DBSCAN_multiplex @ load:\n" "identifying the neighbors within an hypersphere of radius {eps} around each sample," " while at the same time evaluating the number of epsilon-neighbors for each sample.\n" "This might take a fair amount of time." . format ( ** locals ( ) ) ) )
beg_neigh = time . time ( )
fileh = tables . open_file ( hdf5_file_name , mode = 'r+' )
DBSCAN_group = fileh . create_group ( fileh . root , 'DBSCAN_group' )
neighborhoods_indices = fileh . create_earray ( DBSCAN_group , 'neighborhoods_indices' , tables . Int32Atom ( ) , ( 0 , ) , 'Indices array for sparse matrix of neighborhoods' , expectedrows = int ( ( N_samples ** 2 ) / 50 ) )
# ' neighborhoods _ indptr ' is such that for each of row i of the data - matrix
# neighborhoods _ indices [ neighborhoods _ indptr [ i ] : neighborhoods _ indptr [ i + 1 ] ]
# contains the column indices of row i from the array of
# ' eps ' - neighborhoods .
neighborhoods_indptr = np . zeros ( 1 , dtype = np . int64 )
# For each sample , ' neighbors _ counts ' will keep a tally of the number
# of its neighbors within a hypersphere of radius ' eps ' .
# Note that the sample itself is counted as part of this neighborhood .
neighbors_counts = fileh . create_carray ( DBSCAN_group , 'neighbors_counts' , tables . Int32Atom ( ) , ( N_runs , N_samples ) , 'Array of the number of neighbors around each sample of a set of subsampled points' , filters = None )
chunks_size = get_chunk_size ( N_samples , 3 )
for i in range ( 0 , N_samples , chunks_size ) :
chunk = data [ i : min ( i + chunks_size , N_samples ) ]
D = pairwise_distances ( chunk , data , metric = metric , p = p , n_jobs = 1 )
D = ( D <= eps )
if samples_weights is None :
for run in range ( N_runs ) :
x = subsamples_matrix [ run ]
M = np . take ( D , x , axis = 1 )
legit_rows = np . intersect1d ( i + np . arange ( min ( chunks_size , N_samples - i ) ) , x , assume_unique = True )
M = np . take ( M , legit_rows - i , axis = 0 )
neighbors_counts [ run , legit_rows ] = M . sum ( axis = 1 )
del M
else :
for run in range ( N_runs ) :
x = subsamples_matrix [ run ]
M = np . take ( D , x , axis = 1 )
legit_rows = np . intersect1d ( i + np . arange ( min ( chunks_size , N_samples - i ) ) , x , assume_unique = True )
M = np . take ( M , legit_rows - i , axis = 0 )
neighbors_counts [ run , legit_rows ] = np . array ( [ np . sum ( samples_weights [ x [ row ] ] ) for row in M ] )
del M
candidates = np . where ( D == True )
del D
neighborhoods_indices . append ( candidates [ 1 ] )
_ , nbr = np . unique ( candidates [ 0 ] , return_counts = True )
counts = np . cumsum ( nbr ) + neighborhoods_indptr [ - 1 ]
del candidates
neighborhoods_indptr = np . append ( neighborhoods_indptr , counts )
fileh . create_carray ( DBSCAN_group , 'neighborhoods_indptr' , tables . Int64Atom ( ) , ( N_samples + 1 , ) , 'Array of cumulative number of column indices for each row' , filters = None )
fileh . root . DBSCAN_group . neighborhoods_indptr [ : ] = neighborhoods_indptr [ : ]
fileh . create_carray ( DBSCAN_group , 'subsamples_matrix' , tables . Int32Atom ( ) , ( N_runs , N_subsamples ) , 'Array of subsamples indices' , filters = None )
fileh . root . DBSCAN_group . subsamples_matrix [ : ] = subsamples_matrix [ : ]
fileh . close ( )
end_neigh = time . time ( )
if verbose :
print ( ( "\nINFO: DBSCAN_multiplex @ load:\n" "done with the neighborhoods. This step took {} seconds." . format ( round ( end_neigh - beg_neigh , 4 ) ) ) )
gc . collect ( )
return eps
|
def get_group ( self , uuid = None ) :
"""Get group data based on uuid .
Args :
uuid ( str ) : optional uuid . defaults to self . cuuid
Raises :
PyLmodUnexpectedData : No data was returned .
requests . RequestException : Exception connection error
Returns :
dict : group json"""
|
if uuid is None :
uuid = self . uuid
group_data = self . get ( 'group' , params = { 'uuid' : uuid } )
return group_data
|
def contains ( bank , key ) :
'''Checks if the specified bank contains the specified key .'''
|
if key is None :
return True
# any key could be a branch and a leaf at the same time in Consul
else :
try :
c_key = '{0}/{1}' . format ( bank , key )
_ , value = api . kv . get ( c_key )
except Exception as exc :
raise SaltCacheError ( 'There was an error getting the key, {0}: {1}' . format ( c_key , exc ) )
return value is not None
|
def get ( path ) :
'''Get the content of the docker - compose file into a directory
path
Path where the docker - compose file is stored on the server
CLI Example :
. . code - block : : bash
salt myminion dockercompose . get / path / where / docker - compose / stored'''
|
file_path = __get_docker_file_path ( path )
if file_path is None :
return __standardize_result ( False , 'Path {} is not present' . format ( path ) , None , None )
salt_result = __read_docker_compose_file ( file_path )
if not salt_result [ 'status' ] :
return salt_result
project = __load_project ( path )
if isinstance ( project , dict ) :
salt_result [ 'return' ] [ 'valid' ] = False
else :
salt_result [ 'return' ] [ 'valid' ] = True
return salt_result
|
def extensions ( ) :
"""How do we handle cython :
1 . when on git , require cython during setup time ( do not distribute
generated . c files via git )
a ) cython present - > fine
b ) no cython present - > install it on the fly . Extensions have to have . pyx suffix
This is solved via a lazy evaluation of the extension list . This is needed ,
because build _ ext is being called before cython will be available .
https : / / bitbucket . org / pypa / setuptools / issue / 288 / cannot - specify - cython - under - setup _ requires
2 . src dist install ( have pre - converted c files and pyx files )
a ) cython present - > fine
b ) no cython - > use . c files"""
|
USE_CYTHON = False
try :
from Cython . Build import cythonize
USE_CYTHON = True
except ImportError :
warnings . warn ( 'Cython not found. Using pre cythonized files.' )
import mdtraj
# Note , that we add numpy include to every extension after declaration .
from numpy import get_include as _np_inc
np_inc = _np_inc ( )
pybind_inc = get_pybind_include ( )
lib_prefix = 'lib' if sys . platform . startswith ( 'win' ) else ''
common_cflags = [ '-O3' , ]
clustering_module = Extension ( 'pyemma.coordinates.clustering._ext' , sources = [ 'pyemma/coordinates/clustering/src/clustering_module.cpp' ] , include_dirs = [ mdtraj . capi ( ) [ 'include_dir' ] , pybind_inc , 'pyemma/coordinates/clustering/include' , ] , language = 'c++' , libraries = [ lib_prefix + 'theobald' ] , library_dirs = [ mdtraj . capi ( ) [ 'lib_dir' ] ] , extra_compile_args = common_cflags )
covar_module = Extension ( 'pyemma._ext.variational.estimators.covar_c._covartools' , sources = [ 'pyemma/_ext/variational/estimators/covar_c/covartools.cpp' ] , include_dirs = [ 'pyemma/_ext/variational/estimators/covar_c/' , pybind_inc , ] , language = 'c++' , extra_compile_args = common_cflags )
eig_qr_module = Extension ( 'pyemma._ext.variational.solvers.eig_qr.eig_qr' , sources = [ 'pyemma/_ext/variational/solvers/eig_qr/eig_qr.pyx' ] , include_dirs = [ 'pyemma/_ext/variational/solvers/eig_qr/' , np_inc ] , extra_compile_args = [ '-std=c99' ] + common_cflags )
orderedset = Extension ( 'pyemma._ext.orderedset._orderedset' , sources = [ 'pyemma/_ext/orderedset/_orderedset.pyx' ] , extra_compile_args = [ '-std=c99' ] + common_cflags )
extra_compile_args = [ "-O3" , "-std=c99" ]
ext_bar = Extension ( "pyemma.thermo.extensions.bar" , sources = [ "pyemma/thermo/extensions/bar/bar.pyx" , "pyemma/thermo/extensions/bar/_bar.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
ext_wham = Extension ( "pyemma.thermo.extensions.wham" , sources = [ "pyemma/thermo/extensions/wham/wham.pyx" , "pyemma/thermo/extensions/wham/_wham.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
ext_mbar = Extension ( "pyemma.thermo.extensions.mbar" , sources = [ "pyemma/thermo/extensions/mbar/mbar.pyx" , "pyemma/thermo/extensions/mbar/_mbar.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
ext_tram = Extension ( "pyemma.thermo.extensions.tram" , sources = [ "pyemma/thermo/extensions/tram/tram.pyx" , "pyemma/thermo/extensions/tram/_tram.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
ext_dtram = Extension ( "pyemma.thermo.extensions.dtram" , sources = [ "pyemma/thermo/extensions/dtram/dtram.pyx" , "pyemma/thermo/extensions/dtram/_dtram.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
ext_trammbar = Extension ( "pyemma.thermo.extensions.trammbar" , sources = [ "pyemma/thermo/extensions/trammbar/trammbar.pyx" , "pyemma/thermo/extensions/tram/_tram.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args + [ "-DTRAMMBAR" ] )
ext_mbar_direct = Extension ( "pyemma.thermo.extensions.mbar_direct" , sources = [ "pyemma/thermo/extensions/mbar_direct/mbar_direct.pyx" , "pyemma/thermo/extensions/mbar_direct/_mbar_direct.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
ext_tram_direct = Extension ( "pyemma.thermo.extensions.tram_direct" , sources = [ "pyemma/thermo/extensions/tram_direct/tram_direct.pyx" , "pyemma/thermo/extensions/tram_direct/_tram_direct.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
ext_trammbar_direct = Extension ( "pyemma.thermo.extensions.trammbar_direct" , sources = [ "pyemma/thermo/extensions/trammbar_direct/trammbar_direct.pyx" , "pyemma/thermo/extensions/tram_direct/_tram_direct.c" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args + [ "-DTRAMMBAR" ] )
ext_util = Extension ( "pyemma.thermo.extensions.util" , sources = [ "pyemma/thermo/extensions/util/util.pyx" , "pyemma/thermo/extensions/util/_util.c" ] , extra_compile_args = extra_compile_args )
exts_thermo = [ ext_bar , ext_wham , ext_mbar , ext_tram , ext_dtram , ext_trammbar , ext_mbar_direct , ext_tram_direct , ext_trammbar_direct , ext_util ]
exts = [ clustering_module , covar_module , eig_qr_module , orderedset ]
exts += exts_thermo
for e in exts :
e . include_dirs . append ( np_inc )
if not USE_CYTHON : # replace pyx files by their pre generated c code .
for e in exts :
new_src = [ ]
for s in e . sources :
new_src . append ( s . replace ( '.pyx' , '.c' ) )
e . sources = new_src
else :
exts = cythonize ( exts , language_level = sys . version_info [ 0 ] )
return exts
|
def _get_config_file_path ( xdg_config_dir , xdg_config_file ) :
"""Search ` ` XDG _ CONFIG _ DIRS ` ` for a config file and return the first found .
Search each of the standard XDG configuration directories for a
configuration file . Return as soon as a configuration file is found . Beware
that by the time client code attempts to open the file , it may be gone or
otherwise inaccessible .
: param xdg _ config _ dir : A string . The name of the directory that is suffixed
to the end of each of the ` ` XDG _ CONFIG _ DIRS ` ` paths .
: param xdg _ config _ file : A string . The name of the configuration file that
is being searched for .
: returns : A ` ` str ` ` path to a configuration file .
: raises nailgun . config . ConfigFileError : When no configuration file can be
found ."""
|
for config_dir in BaseDirectory . load_config_paths ( xdg_config_dir ) :
path = join ( config_dir , xdg_config_file )
if isfile ( path ) :
return path
raise ConfigFileError ( 'No configuration files could be located after searching for a file ' 'named "{0}" in the standard XDG configuration paths, such as ' '"~/.config/{1}/".' . format ( xdg_config_file , xdg_config_dir ) )
|
def download_file_job ( entry , directory , checksums , filetype = 'genbank' , symlink_path = None ) :
"""Generate a DownloadJob that actually triggers a file download ."""
|
pattern = NgdConfig . get_fileending ( filetype )
filename , expected_checksum = get_name_and_checksum ( checksums , pattern )
base_url = convert_ftp_url ( entry [ 'ftp_path' ] )
full_url = '{}/{}' . format ( base_url , filename )
local_file = os . path . join ( directory , filename )
full_symlink = None
if symlink_path is not None :
full_symlink = os . path . join ( symlink_path , filename )
# Keep metadata around
mtable = metadata . get ( )
mtable . add ( entry , local_file )
return DownloadJob ( full_url , local_file , expected_checksum , full_symlink )
|
def _get_masses ( fitnesses ) :
"""Convert fitnesses into masses , as given by GSA algorithm ."""
|
# Obtain constants
best_fitness = max ( fitnesses )
worst_fitness = min ( fitnesses )
fitness_range = best_fitness - worst_fitness
# Calculate raw masses for each solution
raw_masses = [ ]
for fitness in fitnesses : # Epsilon is added to prevent divide by zero errors
raw_masses . append ( ( fitness - worst_fitness ) / ( fitness_range + EPSILON ) + EPSILON )
# Normalize to obtain final mass for each solution
total_mass = sum ( raw_masses )
masses = [ ]
for mass in raw_masses :
masses . append ( mass / total_mass )
return masses
|
def get_command ( self , pure = False ) :
"""Get command from message
: return :"""
|
command = self . get_full_command ( )
if command :
command = command [ 0 ]
if pure :
command , _ , _ = command [ 1 : ] . partition ( '@' )
return command
|
def setup ( addr , user , remote_path , local_key = None ) :
"""Setup the tunnel"""
|
port = find_port ( addr , user )
if not port or not is_alive ( addr , user ) :
port = new_port ( )
scp ( addr , user , __file__ , '~/unixpipe' , local_key )
ssh_call = [ 'ssh' , '-fL%d:127.0.0.1:12042' % port , '-o' , 'ExitOnForwardFailure=yes' , '-o' , 'ControlPath=~/.ssh/unixpipe_%%r@%%h_%d' % port , '-o' , 'ControlMaster=auto' , '%s@%s' % ( user , addr , ) , 'python' , '~/unixpipe' , 'server' , remote_path ]
if local_key :
ssh_call . insert ( 1 , local_key )
ssh_call . insert ( 1 , '-i' )
subprocess . call ( ssh_call )
# XXX Sleep is a bad way to wait for the tunnel endpoint
time . sleep ( 1 )
return port
|
def autoLayoutSelected ( self , padX = None , padY = None , direction = Qt . Horizontal , layout = 'Layered' , animate = 0 , centerOn = None , center = None ) :
"""Automatically lays out all the selected nodes in the scene using the autoLayoutNodes method .
: param padX | < int > | | None | default is 2 * cell width
padY | < int > | | None | default is 2 * cell height
direction | < Qt . Direction >
layout | < str > | name of the layout plugin to use
animate | < int > | number of seconds to animate over
: return { < XNode > : < QRectF > , . . } | new rects per node"""
|
nodes = self . selectedNodes ( )
return self . autoLayoutNodes ( nodes , padX , padY , direction , layout , animate , centerOn , center )
|
def is_compatible ( self ) :
'''Is the wheel is compatible with the current platform ?'''
|
supported_tags = pep425tags . get_supported ( )
return next ( ( True for t in self . tags ( ) if t in supported_tags ) , False )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.