signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def countDirectlyConnected ( port : LPort , result : dict ) -> int :
"""Count how many ports are directly connected to other nodes
: return : cumulative sum of port counts"""
|
inEdges = port . incomingEdges
outEdges = port . outgoingEdges
if port . children :
ch_cnt = 0
# try :
# assert not inEdges , ( port , port . children , inEdges )
# assert not outEdges , ( port , port . children , outEdges )
# except AssertionError :
# raise
for ch in port . children :
ch_cnt += countDirectlyConnected ( ch , result )
return ch_cnt
elif not inEdges and not outEdges : # this port is not connected , just check if it expected state
if port . direction == PortType . INPUT :
if port . originObj is not None :
assert not port . originObj . src . drivers , port . originObj
else :
print ( "Warning" , port , "not connected" )
return 0
else :
connectedElemCnt = 0
for e in inEdges :
connectedElemCnt += len ( e . srcs )
if connectedElemCnt > 1 :
return 0
for e in outEdges :
connectedElemCnt += len ( e . dsts )
if connectedElemCnt > 1 :
return 0
if connectedElemCnt != 1 :
return 0
if inEdges :
e = inEdges [ 0 ]
else :
e = outEdges [ 0 ]
# if is connected to different port
if e . srcs [ 0 ] . name != e . dsts [ 0 ] . name :
return 0
if e . srcs [ 0 ] is port :
p = e . dsts [ 0 ] . parent
else : # ( can be hyperedge and then this does not have to be )
# assert e . dsts [ 0 ] is port , ( e , port )
p = e . srcs [ 0 ] . parent
# if is part of interface which can be reduced
if not isinstance ( p , LNode ) :
connections = result . get ( p , [ ] )
connections . append ( ( port , e ) )
result [ p ] = connections
return 1
|
def rooms ( self , sid , namespace = None ) :
"""Return the rooms a client is in .
The only difference with the : func : ` socketio . Server . rooms ` method is
that when the ` ` namespace ` ` argument is not given the namespace
associated with the class is used ."""
|
return self . server . rooms ( sid , namespace = namespace or self . namespace )
|
def b58decode ( v , length ) :
"""decode v into a string of len bytes ."""
|
long_value = 0
for ( i , c ) in enumerate ( v [ : : - 1 ] ) :
long_value += __b58chars . find ( c ) * ( __b58base ** i )
result = b''
while long_value >= 256 :
div , mod = divmod ( long_value , 256 )
result = struct . pack ( 'B' , mod ) + result
long_value = div
result = struct . pack ( 'B' , long_value ) + result
nPad = 0
for c in v :
if c == __b58chars [ 0 ] :
nPad += 1
else :
break
result = b'\x00' * nPad + result
if length is not None and len ( result ) != length :
return None
return result
|
def submit_job ( self , halt_on_error = True ) :
"""Submit Batch request to ThreatConnect API ."""
|
# check global setting for override
if self . halt_on_batch_error is not None :
halt_on_error = self . halt_on_batch_error
try :
r = self . tcex . session . post ( '/v2/batch' , json = self . settings )
except Exception as e :
self . tcex . handle_error ( 10505 , [ e ] , halt_on_error )
if not r . ok or 'application/json' not in r . headers . get ( 'content-type' , '' ) :
self . tcex . handle_error ( 10510 , [ r . status_code , r . text ] , halt_on_error )
data = r . json ( )
if data . get ( 'status' ) != 'Success' :
self . tcex . handle_error ( 10510 , [ r . status_code , r . text ] , halt_on_error )
self . tcex . log . debug ( 'Batch Submit Data: {}' . format ( data ) )
return data . get ( 'data' , { } ) . get ( 'batchId' )
|
def all_linked_artifacts_exist ( self ) :
"""All of the artifact paths for this resolve point to existing files ."""
|
if not self . has_resolved_artifacts :
return False
for path in self . resolved_artifact_paths :
if not os . path . isfile ( path ) :
return False
else :
return True
|
def _add_links ( self , links , key = "href" , proxy_key = "proxyURL" , endpoints = None ) :
"""Parses and adds block of links"""
|
if endpoints is None :
endpoints = [ "likes" , "replies" , "shares" , "self" , "followers" , "following" , "lists" , "favorites" , "members" ]
if links . get ( "links" ) :
for endpoint in links [ 'links' ] : # It would seem occasionally the links [ " links " ] [ endpoint ] is
# just a string ( what would be the href value ) . I don ' t know
# why , it ' s likely a bug in pump . io but for now we ' ll support
# this too .
if isinstance ( links [ 'links' ] [ endpoint ] , dict ) :
self . _add_link ( endpoint , links [ 'links' ] [ endpoint ] [ "href" ] )
else :
self . _add_link ( endpoint , links [ "links" ] [ endpoint ] )
for endpoint in endpoints :
if links . get ( endpoint , None ) is None :
continue
if "pump_io" in links [ endpoint ] :
self . _add_link ( endpoint , links [ endpoint ] [ "pump_io" ] [ proxy_key ] )
elif "url" in links [ endpoint ] :
self . _add_link ( endpoint , links [ endpoint ] [ "url" ] )
else :
self . _add_link ( endpoint , links [ endpoint ] [ key ] )
return self . links
|
def urls_old ( self , protocol = Resource . Protocol . http ) :
'''Iterate through all resources registered with this router
and create a list endpoint and a detail endpoint for each one .
Uses the router name as prefix and endpoint name of the resource when registered , to assemble the url pattern .
Uses the constructor - passed url method or class for generating urls'''
|
url_patterns = [ ]
for endpoint , resource_class in self . _registry . items ( ) :
setattr ( resource_class , 'api_name' , self . name )
setattr ( resource_class , 'resource_name' , endpoint )
# append any nested resources the resource may have
nested = [ ]
for route in resource_class . nested_routes ( '/%s/%s/' % ( self . name , endpoint ) ) :
route = route . _replace ( handler = resource_class . wrap_handler ( route . handler , protocol ) )
nested . append ( route )
url_patterns . extend ( nested )
# append resource as list
url_patterns . append ( Route ( path = '/%s/%s/' % ( self . name , endpoint ) , handler = resource_class . as_list ( protocol ) , methods = resource_class . route_methods ( ) , name = '{}_{}_list' . format ( self . name , endpoint ) . replace ( '/' , '_' ) ) )
# append resource as detail
url_patterns . append ( Route ( path = '/%s/%s/%s/' % ( self . name , endpoint , resource_class . route_param ( 'pk' ) ) , handler = resource_class . as_detail ( protocol ) , methods = resource_class . route_methods ( ) , name = '{}_{}_detail' . format ( self . name , endpoint ) . replace ( '/' , '_' ) ) )
return url_patterns
|
def _to_dict ( self ) :
"""Return a json dictionary representing this model ."""
|
_dict = { }
if hasattr ( self , 'field' ) and self . field is not None :
_dict [ 'field' ] = self . field
if hasattr ( self , 'value' ) and self . value is not None :
_dict [ 'value' ] = self . value
return _dict
|
def download_era5_for_gssha ( main_directory , start_datetime , end_datetime , leftlon = - 180 , rightlon = 180 , toplat = 90 , bottomlat = - 90 , precip_only = False ) :
"""Function to download ERA5 data for GSSHA
. . note : : https : / / software . ecmwf . int / wiki / display / WEBAPI / Access + ECMWF + Public + Datasets
Args :
main _ directory ( : obj : ` str ` ) : Location of the output for the forecast data .
start _ datetime ( : obj : ` str ` ) : Datetime for download start .
end _ datetime ( : obj : ` str ` ) : Datetime for download end .
leftlon ( Optional [ : obj : ` float ` ] ) : Left bound for longitude . Default is - 180.
rightlon ( Optional [ : obj : ` float ` ] ) : Right bound for longitude . Default is 180.
toplat ( Optional [ : obj : ` float ` ] ) : Top bound for latitude . Default is 90.
bottomlat ( Optional [ : obj : ` float ` ] ) : Bottom bound for latitude . Default is - 90.
precip _ only ( Optional [ bool ] ) : If True , will only download precipitation .
Example : :
from gsshapy . grid . era _ to _ gssha import download _ era5 _ for _ gssha
era5 _ folder = ' / era5'
leftlon = - 95
rightlon = - 75
toplat = 35
bottomlat = 30
download _ era5 _ for _ gssha ( era5 _ folder , leftlon , rightlon , toplat , bottomlat )"""
|
# parameters : https : / / software . ecmwf . int / wiki / display / CKB / ERA5 _ test + data + documentation # ERA5 _ testdatadocumentation - Parameterlistings
# import here to make sure it is not required to run
from ecmwfapi import ECMWFDataServer
server = ECMWFDataServer ( )
try :
mkdir ( main_directory )
except OSError :
pass
download_area = "{toplat}/{leftlon}/{bottomlat}/{rightlon}" . format ( toplat = toplat , leftlon = leftlon , bottomlat = bottomlat , rightlon = rightlon )
download_datetime = start_datetime
while download_datetime <= end_datetime :
download_file = path . join ( main_directory , "era5_gssha_{0}.nc" . format ( download_datetime . strftime ( "%Y%m%d" ) ) )
download_date = download_datetime . strftime ( "%Y-%m-%d" )
if not path . exists ( download_file ) and not precip_only :
server . retrieve ( { 'dataset' : "era5_test" , # ' oper ' specifies the high resolution daily data , as opposed to monthly means , wave , eda edmm , etc .
'stream' : "oper" , # We want instantaneous parameters , which are archived as type Analysis ( ' an ' ) as opposed to forecast ( fc )
'type' : "an" , # Surface level , as opposed to pressure level ( pl ) or model level ( ml )
'levtype' : "sfc" , # For parameter codes see the ECMWF parameter database at http : / / apps . ecmwf . int / codes / grib / param - db
'param' : "2t/2d/sp/10u/10v/tcc" , # The spatial resolution in ERA5 is 31 km globally on a Gaussian grid .
# Here we us lat / long with 0.25 degrees , which is approximately the equivalent of 31km .
'grid' : "0.25/0.25" , # ERA5 provides hourly analysis
'time' : "00/to/23/by/1" , # area : N / W / S / E
'area' : download_area , 'date' : download_date , 'target' : download_file , 'format' : 'netcdf' , } )
era5_request = { 'dataset' : "era5_test" , 'stream' : "oper" , 'type' : "fc" , 'levtype' : "sfc" , 'param' : "tp/ssrd" , 'grid' : "0.25/0.25" , 'area' : download_area , 'format' : 'netcdf' , }
prec_download_file = path . join ( main_directory , "era5_gssha_{0}_fc.nc" . format ( download_datetime . strftime ( "%Y%m%d" ) ) )
loc_download_file0 = path . join ( main_directory , "era5_gssha_{0}_0_fc.nc" . format ( download_datetime . strftime ( "%Y%m%d" ) ) )
loc_download_file1 = path . join ( main_directory , "era5_gssha_{0}_1_fc.nc" . format ( download_datetime . strftime ( "%Y%m%d" ) ) )
loc_download_file2 = path . join ( main_directory , "era5_gssha_{0}_2_fc.nc" . format ( download_datetime . strftime ( "%Y%m%d" ) ) )
if download_datetime <= start_datetime and not path . exists ( loc_download_file0 ) :
loc_download_date = ( download_datetime - timedelta ( 1 ) ) . strftime ( "%Y-%m-%d" )
# precipitation 0000-0600
era5_request [ 'step' ] = "6/to/12/by/1"
era5_request [ 'time' ] = "18"
era5_request [ 'target' ] = loc_download_file0
era5_request [ 'date' ] = loc_download_date
server . retrieve ( era5_request )
if download_datetime == end_datetime and not path . exists ( loc_download_file1 ) :
loc_download_date = download_datetime . strftime ( "%Y-%m-%d" )
# precipitation 0600-1800
era5_request [ 'step' ] = "1/to/12/by/1"
era5_request [ 'time' ] = "06"
era5_request [ 'target' ] = loc_download_file1
era5_request [ 'date' ] = loc_download_date
server . retrieve ( era5_request )
if download_datetime == end_datetime and not path . exists ( loc_download_file2 ) :
loc_download_date = download_datetime . strftime ( "%Y-%m-%d" )
# precipitation 1800-2300
era5_request [ 'step' ] = "1/to/5/by/1"
era5_request [ 'time' ] = "18"
era5_request [ 'target' ] = loc_download_file2
era5_request [ 'date' ] = loc_download_date
server . retrieve ( era5_request )
if download_datetime < end_datetime and not path . exists ( prec_download_file ) : # precipitation 0600-0600 ( next day )
era5_request [ 'step' ] = "1/to/12/by/1"
era5_request [ 'time' ] = "06/18"
era5_request [ 'target' ] = prec_download_file
era5_request [ 'date' ] = download_date
server . retrieve ( era5_request )
download_datetime += timedelta ( 1 )
|
def reactions ( self , tag , data , reactors ) :
'''Render a list of reactor files and returns a reaction struct'''
|
log . debug ( 'Compiling reactions for tag %s' , tag )
high = { }
chunks = [ ]
try :
for fn_ in reactors :
high . update ( self . render_reaction ( fn_ , tag , data ) )
if high :
errors = self . verify_high ( high )
if errors :
log . error ( 'Unable to render reactions for event %s due to ' 'errors (%s) in one or more of the sls files (%s)' , tag , errors , reactors )
return [ ]
# We ' ll return nothing since there was an error
chunks = self . order_chunks ( self . compile_high_data ( high ) )
except Exception as exc :
log . exception ( 'Exception encountered while compiling reactions' )
self . resolve_aliases ( chunks )
return chunks
|
def _update_pdf ( population , fitnesses , pdfs , quantile ) :
"""Find a better pdf , based on fitnesses ."""
|
# First we determine a fitness threshold based on a quantile of fitnesses
fitness_threshold = _get_quantile_cutoff ( fitnesses , quantile )
# Then check all of our possible pdfs with a stochastic program
return _best_pdf ( pdfs , population , fitnesses , fitness_threshold )
|
def query_single_page ( query , lang , pos , retry = 50 , from_user = False ) :
"""Returns tweets from the given URL .
: param query : The query parameter of the query url
: param lang : The language parameter of the query url
: param pos : The query url parameter that determines where to start looking
: param retry : Number of retries if something goes wrong .
: return : The list of tweets , the pos argument for getting the next page ."""
|
url = get_query_url ( query , lang , pos , from_user )
try :
response = requests . get ( url , headers = HEADER )
if pos is None : # html response
html = response . text or ''
json_resp = None
else :
html = ''
try :
json_resp = json . loads ( response . text )
html = json_resp [ 'items_html' ] or ''
except ValueError as e :
logger . exception ( 'Failed to parse JSON "{}" while requesting "{}"' . format ( e , url ) )
tweets = list ( Tweet . from_html ( html ) )
if not tweets :
if json_resp :
pos = json_resp [ 'min_position' ]
else :
pos = None
if retry > 0 :
return query_single_page ( query , lang , pos , retry - 1 , from_user )
else :
return [ ] , pos
if json_resp :
return tweets , urllib . parse . quote ( json_resp [ 'min_position' ] )
if from_user :
return tweets , tweets [ - 1 ] . id
return tweets , "TWEET-{}-{}" . format ( tweets [ - 1 ] . id , tweets [ 0 ] . id )
except requests . exceptions . HTTPError as e :
logger . exception ( 'HTTPError {} while requesting "{}"' . format ( e , url ) )
except requests . exceptions . ConnectionError as e :
logger . exception ( 'ConnectionError {} while requesting "{}"' . format ( e , url ) )
except requests . exceptions . Timeout as e :
logger . exception ( 'TimeOut {} while requesting "{}"' . format ( e , url ) )
except json . decoder . JSONDecodeError as e :
logger . exception ( 'Failed to parse JSON "{}" while requesting "{}".' . format ( e , url ) )
if retry > 0 :
logger . info ( 'Retrying... (Attempts left: {})' . format ( retry ) )
return query_single_page ( query , lang , pos , retry - 1 )
logger . error ( 'Giving up.' )
return [ ] , None
|
def _decode_var ( cls , string ) :
"""Decodes a given string into the appropriate type in Python .
: param str string : The string to decode
: return : The decoded value"""
|
str_match = cls . quoted_string_regex . match ( string )
if str_match :
return string . strip ( "'" if str_match . groups ( ) [ 0 ] else '"' )
# NOTE : " 1 " . isdigit ( ) results in True because they are idiots
elif string . isdigit ( ) and cls . is_digit_regex . match ( string ) is not None :
return int ( string )
elif string . lower ( ) in ( "true" , "false" ) :
return string . lower ( ) == "true"
elif string . lstrip ( "-" ) . isdigit ( ) :
try :
return int ( string )
except ValueError : # case where we mistake something like " - - 0 " as a int
return string
elif "." in string . lstrip ( "-" ) :
try :
return float ( string )
except ValueError : # one off case where we mistake a single " . " as a float
return string
else :
return string
|
def size ( self ) :
"""The size of this parameter , equivalent to self . value . size"""
|
return np . multiply . reduce ( self . shape , dtype = np . int32 )
|
def validate_work_spec ( cls , work_spec ) :
'''Check that ` work _ spec ` is valid .
It must at the very minimum contain a ` ` name ` ` and ` ` min _ gb ` ` .
: raise rejester . exceptions . ProgrammerError : if it isn ' t valid'''
|
if 'name' not in work_spec :
raise ProgrammerError ( 'work_spec lacks "name"' )
if 'min_gb' not in work_spec or not isinstance ( work_spec [ 'min_gb' ] , ( float , int , long ) ) :
raise ProgrammerError ( 'work_spec["min_gb"] must be a number' )
|
def list_datastores_full ( kwargs = None , call = None ) :
'''List all the datastores for this VMware environment , with extra information
CLI Example :
. . code - block : : bash
salt - cloud - f list _ datastores _ full my - vmware - config'''
|
if call != 'function' :
raise SaltCloudSystemExit ( 'The list_datastores_full function must be called with ' '-f or --function.' )
return { 'Datastores' : salt . utils . vmware . list_datastores_full ( _get_si ( ) ) }
|
def owner ( self , owner ) :
"""Sets the owner of this OauthTokenReference .
User name of the owner of the OAuth token within data . world .
: param owner : The owner of this OauthTokenReference .
: type : str"""
|
if owner is None :
raise ValueError ( "Invalid value for `owner`, must not be `None`" )
if owner is not None and len ( owner ) > 31 :
raise ValueError ( "Invalid value for `owner`, length must be less than or equal to `31`" )
if owner is not None and len ( owner ) < 3 :
raise ValueError ( "Invalid value for `owner`, length must be greater than or equal to `3`" )
if owner is not None and not re . search ( '[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]' , owner ) :
raise ValueError ( "Invalid value for `owner`, must be a follow pattern or equal to `/[a-z0-9](?:-(?!-)|[a-z0-9])+[a-z0-9]/`" )
self . _owner = owner
|
def get_random_condcount ( mode ) :
"""HITs can be in one of three states :
- jobs that are finished
- jobs that are started but not finished
- jobs that are never going to finish ( user decided not to do it )
Our count should be based on the first two , so we count any tasks finished
or any tasks not finished that were started in the last cutoff _ time
minutes , as specified in the cutoff _ time variable in the config file .
Returns a tuple : ( cond , condition )"""
|
cutofftime = datetime . timedelta ( minutes = - CONFIG . getint ( 'Server Parameters' , 'cutoff_time' ) )
starttime = datetime . datetime . now ( ) + cutofftime
try :
conditions = json . load ( open ( os . path . join ( app . root_path , 'conditions.json' ) ) )
numconds = len ( conditions . keys ( ) )
numcounts = 1
except IOError as e :
numconds = CONFIG . getint ( 'Task Parameters' , 'num_conds' )
numcounts = CONFIG . getint ( 'Task Parameters' , 'num_counters' )
participants = Participant . query . filter ( Participant . codeversion == CONFIG . get ( 'Task Parameters' , 'experiment_code_version' ) ) . filter ( Participant . mode == mode ) . filter ( or_ ( Participant . status == COMPLETED , Participant . status == CREDITED , Participant . status == SUBMITTED , Participant . status == BONUSED , Participant . beginhit > starttime ) ) . all ( )
counts = Counter ( )
for cond in range ( numconds ) :
for counter in range ( numcounts ) :
counts [ ( cond , counter ) ] = 0
for participant in participants :
condcount = ( participant . cond , participant . counterbalance )
if condcount in counts :
counts [ condcount ] += 1
mincount = min ( counts . values ( ) )
minima = [ hsh for hsh , count in counts . iteritems ( ) if count == mincount ]
chosen = choice ( minima )
# conds + = [ 0 for _ in range ( 1000 ) ]
# conds + = [ 1 for _ in range ( 1000 ) ]
app . logger . info ( "given %(a)s chose %(b)s" % { 'a' : counts , 'b' : chosen } )
return chosen
|
def all_conditional_state_variables_read ( self , include_loop = True ) :
"""Return the state variable used in a condition
Over approximate and also return index access
It won ' t work if the variable is assigned to a temp variable"""
|
if include_loop :
if self . _all_conditional_state_variables_read_with_loop is None :
self . _all_conditional_state_variables_read_with_loop = self . _explore_functions ( lambda x : self . _explore_func_cond_read ( x , include_loop ) )
return self . _all_conditional_state_variables_read_with_loop
else :
if self . _all_conditional_state_variables_read is None :
self . _all_conditional_state_variables_read = self . _explore_functions ( lambda x : self . _explore_func_cond_read ( x , include_loop ) )
return self . _all_conditional_state_variables_read
|
def make_query ( self , return_score = False ) :
"""Return the index of the sample to be queried and labeled and
selection score of each sample . Read - only .
No modification to the internal states .
Returns
ask _ id : int
The index of the next unlabeled sample to be queried and labeled .
score : list of ( index , score ) tuple
Selection score of unlabled entries , the larger the better ."""
|
dataset = self . dataset
self . model . train ( dataset )
unlabeled_entry_ids , X_pool = zip ( * dataset . get_unlabeled_entries ( ) )
if isinstance ( self . model , ProbabilisticModel ) :
dvalue = self . model . predict_proba ( X_pool )
elif isinstance ( self . model , ContinuousModel ) :
dvalue = self . model . predict_real ( X_pool )
if self . method == 'lc' : # least confident
score = - np . max ( dvalue , axis = 1 )
elif self . method == 'sm' : # smallest margin
if np . shape ( dvalue ) [ 1 ] > 2 : # Find 2 largest decision values
dvalue = - ( np . partition ( - dvalue , 2 , axis = 1 ) [ : , : 2 ] )
score = - np . abs ( dvalue [ : , 0 ] - dvalue [ : , 1 ] )
elif self . method == 'entropy' :
score = np . sum ( - dvalue * np . log ( dvalue ) , axis = 1 )
ask_id = np . argmax ( score )
if return_score :
return unlabeled_entry_ids [ ask_id ] , list ( zip ( unlabeled_entry_ids , score ) )
else :
return unlabeled_entry_ids [ ask_id ]
|
def firstVariant ( ) :
"""first variant of Variants
Read - only"""
|
def fget ( self ) :
if self . variants :
return self . variants [ 0 ]
else :
variant = Variant ( )
return variant
return locals ( )
|
def get_errvar_dataframe ( self , singular_values = None ) :
"""get a pandas dataframe of error variance results indexed
on singular value and ( prediction name , < errvar term > )
Parameters
singular _ values : list
singular values to test . defaults to
range ( 0 , min ( nnz _ obs , nadj _ par ) + 1)
Returns
pandas . DataFrame : pandas . DataFrame
multi - indexed pandas dataframe"""
|
if singular_values is None :
singular_values = np . arange ( 0 , min ( self . pst . nnz_obs , self . pst . npar_adj ) + 1 )
if not isinstance ( singular_values , list ) and not isinstance ( singular_values , np . ndarray ) :
singular_values = [ singular_values ]
results = { }
for singular_value in singular_values :
sv_results = self . variance_at ( singular_value )
for key , val in sv_results . items ( ) :
if key not in results . keys ( ) :
results [ key ] = [ ]
results [ key ] . append ( val )
return pd . DataFrame ( results , index = singular_values )
|
def close_transport ( self ) :
"""Forcibly close previously acquired media transport .
. . note : : The user should first make sure any transport
event handlers are unregistered first ."""
|
if ( self . path ) :
self . _release_media_transport ( self . path , self . access_type )
self . path = None
|
def init_app ( self , app ) :
'''Initializes the Flask application with this extension . It grabs
the necessary configuration values from ` ` app . config ` ` , those being
HASHING _ METHOD and HASHING _ ROUNDS . HASHING _ METHOD defaults to ` ` sha256 ` `
but can be any one of ` ` hashlib . algorithms ` ` . HASHING _ ROUNDS specifies
the number of times to hash the input with the specified algorithm .
This defaults to 1.
: param app : Flask application object'''
|
self . algorithm = app . config . get ( 'HASHING_METHOD' , 'sha256' )
if self . algorithm not in algs :
raise ValueError ( '{} not one of {}' . format ( self . algorithm , algs ) )
self . rounds = app . config . get ( 'HASHING_ROUNDS' , 1 )
if not isinstance ( self . rounds , int ) :
raise TypeError ( 'HASHING_ROUNDS must be type int' )
|
def checkout_commit ( repo_path : str , commit : Any = None ) -> None : # pylint : disable = redefined - outer - name
"""Checkout to a specific commit .
If commit is None then checkout to master ."""
|
commit = commit or 'master'
run_command ( cmd = 'git checkout {}' . format ( commit ) , data = None , location = repo_path , chw = True )
|
def _item_to_metric ( iterator , log_metric_pb ) :
"""Convert a metric protobuf to the native object .
: type iterator : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: param iterator : The iterator that is currently in use .
: type log _ metric _ pb :
: class : ` . logging _ metrics _ pb2 . LogMetric `
: param log _ metric _ pb : Metric protobuf returned from the API .
: rtype : : class : ` ~ google . cloud . logging . metric . Metric `
: returns : The next metric in the page ."""
|
# NOTE : LogMetric message type does not have an ` ` Any ` ` field
# so ` MessageToDict ` ` can safely be used .
resource = MessageToDict ( log_metric_pb )
return Metric . from_api_repr ( resource , iterator . client )
|
def discordian_calendar ( season = None , year = None , dtobj = None ) :
"""Prints a discordian calendar for a particular season and year .
Args : :
season : integer cardinal season from 1 to 5
year : integer discordian year from 1166 to MAXYEAR + 1166
dtobj : datetime object to instatiate the calendar from ( Gregorian )"""
|
now = DDate ( dtobj )
moved_year = None
if season is None :
season = now . season
elif season . lower ( ) == "next" :
season , moved_year = _season_overflow ( now . season or 0 + 1 , moved_year , now )
else : # allow for + 1 , - 2 , for seasons . . .
for symbol , oper in zip ( ( "+" , "-" ) , ( operator . add , operator . sub ) ) :
if symbol in season :
try :
amount = int ( season . strip ( symbol ) )
except ValueError :
raise ValueError ( "unknown season: {}" . format ( season ) )
else :
season , moved_year = _season_overflow ( oper ( now . season or 0 , amount ) , moved_year , now , )
break
else : # allow to use the season name or some starting part of it
input_name = season . lower ( )
for season_name in now . SEASONS :
_name = season_name . lower ( )
if input_name == _name or any ( [ n . startswith ( input_name ) for n in _name . split ( " " ) ] ) :
season = now . SEASONS . index ( season_name )
break
else :
try : # last try with a literal int being passed in
season = int ( season )
except ValueError :
raise ValueError ( "unknown season: {}" . format ( season ) )
else :
if not 1 <= season <= 5 :
raise ValueError ( "season must be in 1..5" )
season -= 1
# allowing cardinal numbers from the user
if year is None :
year = moved_year or now . year
elif year . lower ( ) == "next" :
year = ( moved_year or now . year ) + 1
else :
for symbol , oper in zip ( ( "+" , "-" ) , ( operator . add , operator . sub ) ) :
if symbol in year :
year = oper ( moved_year or now . year , int ( year . strip ( symbol ) ) )
break
else :
try :
year = int ( year )
except ValueError :
raise ValueError ( "invalid year: {}" . format ( year ) )
if not MINYEAR <= year <= MAXYEAR : # otherwise this error isn ' t that helpful
raise ValueError ( "year must be in {}..{}" . format ( MINYEAR , MAXYEAR ) )
if now . day_of_season is None :
if is_leap_year ( year - 1166 ) :
day_of_season = None
else :
day_of_season = 59
season = season or 0
else :
day_of_season = now . day_of_season
if day_of_season :
cal_date = DDate ( year = year , season = season , day_of_season = day_of_season )
cal = MultiCalendar ( discordian = True , date = cal_date )
cal . print_calendar ( )
else :
print ( "{} in YOLD {}" . format ( now . holiday , year ) )
|
def _get_all_resourcescenarios ( network_id , user_id ) :
"""Get all the resource scenarios in a network , across all scenarios
returns a dictionary of dict objects , keyed on scenario _ id"""
|
rs_qry = db . DBSession . query ( Dataset . type , Dataset . unit_id , Dataset . name , Dataset . hash , Dataset . cr_date , Dataset . created_by , Dataset . hidden , Dataset . value , ResourceScenario . dataset_id , ResourceScenario . scenario_id , ResourceScenario . resource_attr_id , ResourceScenario . source , ResourceAttr . attr_id , ) . outerjoin ( DatasetOwner , and_ ( DatasetOwner . dataset_id == Dataset . id , DatasetOwner . user_id == user_id ) ) . filter ( or_ ( Dataset . hidden == 'N' , Dataset . created_by == user_id , DatasetOwner . user_id != None ) , ResourceAttr . id == ResourceScenario . resource_attr_id , Scenario . id == ResourceScenario . scenario_id , Scenario . network_id == network_id , Dataset . id == ResourceScenario . dataset_id )
x = time . time ( )
logging . info ( "Getting all resource scenarios" )
all_rs = db . DBSession . execute ( rs_qry . statement ) . fetchall ( )
log . info ( "%s resource scenarios retrieved in %s" , len ( all_rs ) , time . time ( ) - x )
logging . info ( "resource scenarios retrieved. Processing results..." )
x = time . time ( )
rs_dict = dict ( )
for rs in all_rs :
rs_obj = JSONObject ( rs )
rs_attr = JSONObject ( { 'attr_id' : rs . attr_id } )
value = rs . value
rs_dataset = JSONDataset ( { 'id' : rs . dataset_id , 'type' : rs . type , 'unit_id' : rs . unit_id , 'name' : rs . name , 'hash' : rs . hash , 'cr_date' : rs . cr_date , 'created_by' : rs . created_by , 'hidden' : rs . hidden , 'value' : value , 'metadata' : { } , } )
rs_obj . resourceattr = rs_attr
rs_obj . value = rs_dataset
rs_obj . dataset = rs_dataset
scenario_rs = rs_dict . get ( rs . scenario_id , [ ] )
scenario_rs . append ( rs_obj )
rs_dict [ rs . scenario_id ] = scenario_rs
logging . info ( "resource scenarios processed in %s" , time . time ( ) - x )
return rs_dict
|
def file_upload_notification ( self , area_uuid , filename ) :
"""Notify Upload Service that a file has been placed in an Upload Area
: param str area _ uuid : A RFC4122 - compliant ID for the upload area
: param str filename : The name the file in the Upload Area
: return : True
: rtype : bool
: raises UploadApiException : if file could not be stored"""
|
url_safe_filename = urlparse . quote ( filename )
path = ( "/area/{area_uuid}/{filename}" . format ( area_uuid = area_uuid , filename = url_safe_filename ) )
response = self . _make_request ( 'post' , path = path )
return response . ok
|
def deltas ( self , start , height ) :
"""Return a list - like ( i . e . iterable ) object of ( y , x ) tuples"""
|
for y in range ( start , min ( start + height , self . _height ) ) :
for x in range ( self . _width ) :
old_cell = self . _screen_buffer [ y ] [ x ]
new_cell = self . _double_buffer [ y ] [ x ]
if old_cell != new_cell :
yield y , x
|
def _structure_set ( self , obj , cl ) :
"""Convert an iterable into a potentially generic set ."""
|
if is_bare ( cl ) or cl . __args__ [ 0 ] is Any :
return set ( obj )
else :
elem_type = cl . __args__ [ 0 ]
return { self . _structure_func . dispatch ( elem_type ) ( e , elem_type ) for e in obj }
|
def execute ( self , connection_id , statement_id , signature , parameter_values = None , first_frame_max_size = None ) :
"""Returns a frame of rows .
The frame describes whether there may be another frame . If there is not
another frame , the current iteration is done when we have finished the
rows in the this frame .
: param connection _ id :
ID of the current connection .
: param statement _ id :
ID of the statement to fetch rows from .
: param signature :
common _ pb2 . Signature object
: param parameter _ values :
A list of parameter values , if statement is to be executed ; otherwise ` ` None ` ` .
: param first _ frame _ max _ size :
The maximum number of rows that will be returned in the first Frame returned for this query .
: returns :
Frame data , or ` ` None ` ` if there are no more ."""
|
request = requests_pb2 . ExecuteRequest ( )
request . statementHandle . id = statement_id
request . statementHandle . connection_id = connection_id
request . statementHandle . signature . CopyFrom ( signature )
if parameter_values is not None :
request . parameter_values . extend ( parameter_values )
request . has_parameter_values = True
if first_frame_max_size is not None :
request . deprecated_first_frame_max_size = first_frame_max_size
request . first_frame_max_size = first_frame_max_size
response_data = self . _apply ( request )
response = responses_pb2 . ExecuteResponse ( )
response . ParseFromString ( response_data )
return response . results
|
def drop_constant_column_levels ( df ) :
"""drop the levels of a multi - level column dataframe which are constant
operates in place"""
|
columns = df . columns
constant_levels = [ i for i , level in enumerate ( columns . levels ) if len ( level ) <= 1 ]
constant_levels . reverse ( )
for i in constant_levels :
columns = columns . droplevel ( i )
df . columns = columns
|
async def proxy ( self , port , proxied_path ) :
'''This serverextension handles :
{ base _ url } / proxy / { port ( [ 0-9 ] + ) } / { proxied _ path }
{ base _ url } / proxy / absolute / { port ( [ 0-9 ] + ) } / { proxied _ path }
{ base _ url } / { proxy _ base } / { proxied _ path }'''
|
if 'Proxy-Connection' in self . request . headers :
del self . request . headers [ 'Proxy-Connection' ]
self . _record_activity ( )
if self . request . headers . get ( "Upgrade" , "" ) . lower ( ) == 'websocket' : # We wanna websocket !
# jupyterhub / jupyter - server - proxy @ 36b3214
self . log . info ( "we wanna websocket, but we don't define WebSocketProxyHandler" )
self . set_status ( 500 )
body = self . request . body
if not body :
if self . request . method == 'POST' :
body = b''
else :
body = None
client = httpclient . AsyncHTTPClient ( )
req = self . _build_proxy_request ( port , proxied_path , body )
response = await client . fetch ( req , raise_error = False )
# record activity at start and end of requests
self . _record_activity ( )
# For all non http errors . . .
if response . error and type ( response . error ) is not httpclient . HTTPError :
self . set_status ( 500 )
self . write ( str ( response . error ) )
else :
self . set_status ( response . code , response . reason )
# clear tornado default header
self . _headers = httputil . HTTPHeaders ( )
for header , v in response . headers . get_all ( ) :
if header not in ( 'Content-Length' , 'Transfer-Encoding' , 'Content-Encoding' , 'Connection' ) : # some header appear multiple times , eg ' Set - Cookie '
self . add_header ( header , v )
if response . body :
self . write ( response . body )
|
def set_primary_contact ( self , email ) :
"""assigns the primary contact for this client"""
|
params = { "email" : email }
response = self . _put ( self . uri_for ( 'primarycontact' ) , params = params )
return json_to_py ( response )
|
def argument ( self , * args , ** kwargs ) :
"""Registers a click . argument which falls back to a configmanager Item
if user hasn ' t provided a value in the command line .
Item must be the last of ` ` args ` ` ."""
|
if kwargs . get ( 'required' , True ) :
raise TypeError ( 'In click framework, arguments are mandatory, unless marked required=False. ' 'Attempt to use configmanager as a fallback provider suggests that this is an optional option, ' 'not a mandatory argument.' )
args , kwargs = _config_parameter ( args , kwargs )
return self . _click . argument ( * args , ** kwargs )
|
async def write ( self ) :
"""Code borrowed from StrictRedis so it can be fixed"""
|
connection = self . connection
commands = self . commands
# We are going to clobber the commands with the write , so go ahead
# and ensure that nothing is sitting there from a previous run .
for c in commands :
c . result = None
# build up all commands into a single request to increase network perf
# send all the commands and catch connection and timeout errors .
try :
await connection . send_packed_command ( connection . pack_commands ( [ c . args for c in commands ] ) )
except ( ConnectionError , TimeoutError ) as e :
for c in commands :
c . result = e
|
def show ( self ) :
"""Create the Toplevel widget and its child widgets to show in the spot of the cursor .
This is the callback for the delayed : obj : ` < Enter > ` event ( see : meth : ` ~ Balloon . _ on _ enter ` ) ."""
|
self . _toplevel = tk . Toplevel ( self . master )
self . _canvas = tk . Canvas ( self . _toplevel , background = self . __background )
self . header_label = ttk . Label ( self . _canvas , text = self . __headertext , background = self . __background , image = self . _photo_image , compound = tk . LEFT )
self . text_label = ttk . Label ( self . _canvas , text = self . __text , wraplength = self . __width , background = self . __background )
self . _toplevel . attributes ( "-topmost" , True )
self . _toplevel . overrideredirect ( True )
self . _grid_widgets ( )
x , y = self . master . winfo_pointerxy ( )
self . _canvas . update ( )
# Update the Geometry of the Toplevel to update its position and size
self . _toplevel . geometry ( "{0}x{1}+{2}+{3}" . format ( self . _canvas . winfo_width ( ) , self . _canvas . winfo_height ( ) , x + 2 , y + 2 ) )
|
def update_voice_model ( self , customization_id , name = None , description = None , words = None , ** kwargs ) :
"""Update a custom model .
Updates information for the specified custom voice model . You can update metadata
such as the name and description of the voice model . You can also update the words
in the model and their translations . Adding a new translation for a word that
already exists in a custom model overwrites the word ' s existing translation . A
custom model can contain no more than 20,000 entries . You must use credentials for
the instance of the service that owns a model to update it .
You can define sounds - like or phonetic translations for words . A sounds - like
translation consists of one or more words that , when combined , sound like the
word . Phonetic translations are based on the SSML phoneme format for representing
a word . You can specify them in standard International Phonetic Alphabet ( IPA )
representation
< code > & lt ; phoneme alphabet = \" ipa \"
ph = \" t & # 601 ; m & # 712 ; & # 593 ; to \" & gt ; & lt ; / phoneme & gt ; < / code >
or in the proprietary IBM Symbolic Phonetic Representation ( SPR )
< code > & lt ; phoneme alphabet = \" ibm \"
ph = \" 1gAstroEntxrYFXs \" & gt ; & lt ; / phoneme & gt ; < / code >
* * Note : * * This method is currently a beta release .
* * See also : * *
* [ Updating a custom
model ] ( https : / / cloud . ibm . com / docs / services / text - to - speech / custom - models . html # cuModelsUpdate )
* [ Adding words to a Japanese custom
model ] ( https : / / cloud . ibm . com / docs / services / text - to - speech / custom - entries . html # cuJapaneseAdd )
* [ Understanding
customization ] ( https : / / cloud . ibm . com / docs / services / text - to - speech / custom - intro . html ) .
: param str customization _ id : The customization ID ( GUID ) of the custom voice
model . You must make the request with service credentials created for the instance
of the service that owns the custom model .
: param str name : A new name for the custom voice model .
: param str description : A new description for the custom voice model .
: param list [ Word ] words : An array of ` Word ` objects that provides the words and
their translations that are to be added or updated for the custom voice model .
Pass an empty array to make no additions or updates .
: param dict headers : A ` dict ` containing the request headers
: return : A ` DetailedResponse ` containing the result , headers and HTTP status code .
: rtype : DetailedResponse"""
|
if customization_id is None :
raise ValueError ( 'customization_id must be provided' )
if words is not None :
words = [ self . _convert_model ( x , Word ) for x in words ]
headers = { }
if 'headers' in kwargs :
headers . update ( kwargs . get ( 'headers' ) )
sdk_headers = get_sdk_headers ( 'text_to_speech' , 'V1' , 'update_voice_model' )
headers . update ( sdk_headers )
data = { 'name' : name , 'description' : description , 'words' : words }
url = '/v1/customizations/{0}' . format ( * self . _encode_path_vars ( customization_id ) )
response = self . request ( method = 'POST' , url = url , headers = headers , json = data , accept_json = True )
return response
|
def get_performance_data ( self , project , ** params ) :
'''Gets a dictionary of PerformanceSeries objects
You can specify which signatures to get by passing signature to this function'''
|
results = self . _get_json ( self . PERFORMANCE_DATA_ENDPOINT , project , ** params )
return { k : PerformanceSeries ( v ) for k , v in results . items ( ) }
|
def _unpack_object_array ( inp , source , prescatter ) :
"""Unpack Array [ Object ] with a scatter for referencing in input calls .
There is no shorthand syntax for referencing all items in an array , so
we explicitly unpack them with a scatter ."""
|
raise NotImplementedError ( "Currently not used with record/struct/object improvements" )
base_rec , attr = source . rsplit ( "." , 1 )
new_name = "%s_%s_unpack" % ( inp [ "name" ] , base_rec . replace ( "." , "_" ) )
prescatter [ base_rec ] . append ( ( new_name , attr , _to_variable_type ( inp [ "type" ] [ "items" ] ) ) )
return new_name , prescatter
|
def __definitions_descriptor ( self ) :
"""Describes the definitions section of the OpenAPI spec .
Returns :
Dictionary describing the definitions of the spec ."""
|
# Filter out any keys that aren ' t ' properties ' or ' type '
result = { }
for def_key , def_value in self . __parser . schemas ( ) . iteritems ( ) :
if 'properties' in def_value or 'type' in def_value :
key_result = { }
required_keys = set ( )
if 'type' in def_value :
key_result [ 'type' ] = def_value [ 'type' ]
if 'properties' in def_value :
for prop_key , prop_value in def_value [ 'properties' ] . items ( ) :
if isinstance ( prop_value , dict ) and 'required' in prop_value :
required_keys . add ( prop_key )
del prop_value [ 'required' ]
key_result [ 'properties' ] = def_value [ 'properties' ]
# Add in the required fields , if any
if required_keys :
key_result [ 'required' ] = sorted ( required_keys )
result [ def_key ] = key_result
# Add ' type ' : ' object ' to all object properties
# Also , recursively add relative path to all $ ref values
for def_value in result . itervalues ( ) :
for prop_value in def_value . itervalues ( ) :
if isinstance ( prop_value , dict ) :
if '$ref' in prop_value :
prop_value [ 'type' ] = 'object'
self . _add_def_paths ( prop_value )
return result
|
def create ( self , check , notification_plan , criteria = None , disabled = False , label = None , name = None , metadata = None ) :
"""Creates an alarm that binds the check on the given entity with a
notification plan .
Note that the ' criteria ' parameter , if supplied , should be a string
representing the DSL for describing alerting conditions and their
output states . Pyrax does not do any validation of these criteria
statements ; it is up to you as the developer to understand the language
and correctly form the statement . This alarm language is documented
online in the Cloud Monitoring section of http : / / docs . rackspace . com ."""
|
uri = "/%s" % self . uri_base
body = { "check_id" : utils . get_id ( check ) , "notification_plan_id" : utils . get_id ( notification_plan ) , }
if criteria :
body [ "criteria" ] = criteria
if disabled is not None :
body [ "disabled" ] = disabled
label_name = label or name
if label_name :
body [ "label" ] = label_name
if metadata :
body [ "metadata" ] = metadata
resp , resp_body = self . api . method_post ( uri , body = body )
if resp . status_code == 201 :
alarm_id = resp . headers [ "x-object-id" ]
return self . get ( alarm_id )
|
def do_WhoHasRequest ( self , apdu ) :
"""Respond to a Who - Has request ."""
|
if _debug :
WhoHasIHaveServices . _debug ( "do_WhoHasRequest, %r" , apdu )
# ignore this if there ' s no local device
if not self . localDevice :
if _debug :
WhoIsIAmServices . _debug ( " - no local device" )
return
# if this has limits , check them like Who - Is
if apdu . limits is not None : # extract the parameters
low_limit = apdu . limits . deviceInstanceRangeLowLimit
high_limit = apdu . limits . deviceInstanceRangeHighLimit
# check for consistent parameters
if ( low_limit is None ) :
raise MissingRequiredParameter ( "deviceInstanceRangeLowLimit required" )
if ( low_limit < 0 ) or ( low_limit > 4194303 ) :
raise ParameterOutOfRange ( "deviceInstanceRangeLowLimit out of range" )
if ( high_limit is None ) :
raise MissingRequiredParameter ( "deviceInstanceRangeHighLimit required" )
if ( high_limit < 0 ) or ( high_limit > 4194303 ) :
raise ParameterOutOfRange ( "deviceInstanceRangeHighLimit out of range" )
# see we should respond
if ( self . localDevice . objectIdentifier [ 1 ] < low_limit ) :
return
if ( self . localDevice . objectIdentifier [ 1 ] > high_limit ) :
return
# find the object
if apdu . object . objectIdentifier is not None :
obj = self . objectIdentifier . get ( apdu . object . objectIdentifier , None )
elif apdu . object . objectName is not None :
obj = self . objectName . get ( apdu . object . objectName , None )
else :
raise InconsistentParameters ( "object identifier or object name required" )
# maybe we don ' t have it
if not obj :
return
# send out the response
self . i_have ( obj , address = apdu . pduSource )
|
def set_promisc ( self , value ) :
"""Set the interface in promiscuous mode"""
|
try :
fcntl . ioctl ( self . ins , BIOCPROMISC , struct . pack ( 'i' , value ) )
except IOError :
raise Scapy_Exception ( "Cannot set promiscuous mode on interface " "(%s)!" % self . iface )
|
def interp_value ( self , lat , lon , indexed = False ) :
"""Lookup a pixel value in the raster data , performing linear interpolation
if necessary . Indexed = = > nearest neighbor ( * fast * ) ."""
|
( px , py ) = self . grid_coordinates . projection_to_raster_coords ( lat , lon )
if indexed :
return self . raster_data [ round ( py ) , round ( px ) ]
else : # from scipy . interpolate import interp2d
# f _ interp = interp2d ( self . grid _ coordinates . x _ axis , self . grid _ coordinates . y _ axis , self . raster _ data , bounds _ error = True )
# return f _ interp ( lon , lat ) [ 0]
from scipy . ndimage import map_coordinates
ret = map_coordinates ( self . raster_data , [ [ py ] , [ px ] ] , order = 1 )
# linear interp
return ret [ 0 ]
|
def project_geometry ( geometry , crs = None , to_crs = None , to_latlong = False ) :
"""Project a shapely Polygon or MultiPolygon from lat - long to UTM , or
vice - versa
Parameters
geometry : shapely Polygon or MultiPolygon
the geometry to project
crs : dict
the starting coordinate reference system of the passed - in geometry ,
default value ( None ) will set settings . default _ crs as the CRS
to _ crs : dict
if not None , just project to this CRS instead of to UTM
to _ latlong : bool
if True , project from crs to lat - long , if False , project from crs to
local UTM zone
Returns
tuple
( geometry _ proj , crs ) , the projected shapely geometry and the crs of the
projected geometry"""
|
if crs is None :
crs = settings . default_crs
gdf = gpd . GeoDataFrame ( )
gdf . crs = crs
gdf . gdf_name = 'geometry to project'
gdf [ 'geometry' ] = None
gdf . loc [ 0 , 'geometry' ] = geometry
gdf_proj = project_gdf ( gdf , to_crs = to_crs , to_latlong = to_latlong )
geometry_proj = gdf_proj [ 'geometry' ] . iloc [ 0 ]
return geometry_proj , gdf_proj . crs
|
def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_vlan_id ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
port_profile = ET . SubElement ( config , "port-profile" , xmlns = "urn:brocade.com:mgmt:brocade-port-profile" )
name_key = ET . SubElement ( port_profile , "name" )
name_key . text = kwargs . pop ( 'name' )
vlan_profile = ET . SubElement ( port_profile , "vlan-profile" )
switchport = ET . SubElement ( vlan_profile , "switchport" )
access_mac_vlan_classification = ET . SubElement ( switchport , "access-mac-vlan-classification" )
access = ET . SubElement ( access_mac_vlan_classification , "access" )
vlan = ET . SubElement ( access , "vlan" )
access_mac_address_key = ET . SubElement ( vlan , "access-mac-address" )
access_mac_address_key . text = kwargs . pop ( 'access_mac_address' )
access_vlan_id = ET . SubElement ( vlan , "access-vlan-id" )
access_vlan_id . text = kwargs . pop ( 'access_vlan_id' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def _get_region ( self , contig , start , end , fout , min_length = 250 ) :
'''Writes reads mapping to given region of contig , trimming part of read not in the region'''
|
sam_reader = pysam . Samfile ( self . bam , "rb" )
trimming_end = ( start == 0 )
for read in sam_reader . fetch ( contig , start , end ) :
read_interval = pyfastaq . intervals . Interval ( read . pos , read . reference_end - 1 )
seq = mapping . aligned_read_to_read ( read , ignore_quality = not self . fastq_out , revcomp = False )
if trimming_end :
bases_off_start = 0
bases_off_end = max ( 0 , read . reference_end - 1 - end )
# seq . seq = seq . seq [ : read . query _ alignment _ end - bases _ off _ end ]
seq = seq . subseq ( 0 , read . query_alignment_end - bases_off_end )
else :
bases_off_start = max ( 0 , start - read . pos + 1 )
# seq . seq = seq . seq [ bases _ off _ start + read . query _ alignment _ start : ]
seq = seq . subseq ( bases_off_start + read . query_alignment_start , len ( seq ) )
if read . is_reverse :
seq . revcomp ( )
if len ( seq ) >= min_length :
print ( seq , file = fout )
|
def _isin ( expr , values ) :
"""Return a boolean sequence or scalar showing whether
each element is exactly contained in the passed ` values ` .
: param expr : sequence or scalar
: param values : ` list ` object or sequence
: return : boolean sequence or scalar"""
|
from . merge import _make_different_sources
if isinstance ( values , SequenceExpr ) :
expr , values = _make_different_sources ( expr , values )
if isinstance ( expr , SequenceExpr ) :
return IsIn ( _input = expr , _values = values , _data_type = types . boolean )
elif isinstance ( expr , Scalar ) :
return IsIn ( _input = expr , _values = values , _value_type = types . boolean )
|
def ValidateEmail ( email , column_name = None , problems = None ) :
"""checks the basic validity of email :
- an empty email is considered valid and no error or warning is issued .
- should start with any string not including @
- then should match a single @
- then matches any string not including @
- then contains a single dot
- then again matches any string after dot ."""
|
if IsEmpty ( email ) or re . match ( r'[^@]+@[^@]+\.[^@]+' , email ) :
return True
else :
if problems :
problems . InvalidValue ( column_name , email )
return False
|
def _safe_squeeze ( arr , * args , ** kwargs ) :
"""numpy . squeeze will reduce a 1 - item array down to a zero - dimensional " array " ,
which is not necessarily desirable .
This function does the squeeze operation , but ensures that there is at least
1 dimension in the output ."""
|
out = np . squeeze ( arr , * args , ** kwargs )
if np . ndim ( out ) == 0 :
out = out . reshape ( ( 1 , ) )
return out
|
def tag_name ( cls , tag ) :
"""return the name of the tag , with the namespace removed"""
|
while isinstance ( tag , etree . _Element ) :
tag = tag . tag
return tag . split ( '}' ) [ - 1 ]
|
def parse_codons ( ref , start , end , strand ) :
"""parse codon nucleotide positions in range start - > end , wrt strand"""
|
codon = [ ]
c = cycle ( [ 1 , 2 , 3 ] )
ref = ref [ start - 1 : end ]
if strand == - 1 :
ref = rc_stats ( ref )
for pos in ref :
n = next ( c )
codon . append ( pos )
if n == 3 :
yield codon
codon = [ ]
|
def _neighbour_to_path_call ( neig_type , neighbour , element ) :
"""Get : class : ` PathCall ` from ` neighbour ` and ` element ` .
Args :
neigh _ type ( str ) : ` left ` for left neighbour , ` right ` for . . This is
used to determine : attr : ` PathCall . call _ type ` of
returned object .
neighbour ( obj ) : Reference to ` neighbour ` object .
element ( obj ) : Reference to HTMLElement holding required data .
Returns :
obj : : class : ` PathCall ` instance with data necessary to find ` element ` by comparing its ` neighbour ` ."""
|
params = [ None , None , neighbour . getContent ( ) . strip ( ) ]
if neighbour . isTag ( ) :
params = [ neighbour . getTagName ( ) , _params_or_none ( neighbour . params ) , neighbour . getContent ( ) . strip ( ) ]
return PathCall ( neig_type + "_neighbour_tag" , 0 , # TODO : Dynamic lookup
NeighCall ( element . getTagName ( ) , _params_or_none ( element . params ) , params ) )
|
def preston_bin ( data , max_num ) :
"""Bins data on base 2 using Preston ' s method
Parameters
data : array - like
Data to be binned
max _ num : float
The maximum upper value of the data
Returns
tuple
( binned _ data , bin _ edges )
Notes
Uses Preston ' s method of binning , which has exclusive lower boundaries and
inclusive upper boundaries . Densities are not split between bins .
Examples
> > > import macroeco . compare as comp
> > > import numpy as np
> > > # Load some data and get Preston bins
> > > data = np . array ( [ 1 , 1 , 1 , 1 , 4 , 5 , 6 , 7 , 12 , 34 , 56 ] )
> > > comp . preston _ bin ( data , np . max ( data ) )
( array ( [ 4 , 0 , 1 , 3 , 1 , 0 , 2 ] ) ,
array ( [ 1 . , 2 . , 3 . , 5 . , 9 . , 17 . , 33 . , 65 . ] ) )
References
Preston , F . ( 1962 ) . The canonical distribution of commonness and rarity .
Ecology , 43 , 185-215"""
|
log_ub = np . ceil ( np . log2 ( max_num ) )
# Make an exclusive lower bound in keeping with Preston
if log_ub == 0 :
boundaries = np . array ( [ 0 , 1 ] )
elif log_ub == 1 :
boundaries = np . arange ( 1 , 4 )
else :
boundaries = 2 ** np . arange ( 0 , log_ub + 1 )
boundaries = np . insert ( boundaries , 2 , 3 )
boundaries [ 3 : ] = boundaries [ 3 : ] + 1
hist_data = np . histogram ( data , bins = boundaries )
return hist_data
|
def execute ( self , i , o ) :
"""Executes the command .
: type i : cleo . inputs . input . Input
: type o : cleo . outputs . output . Output"""
|
config = self . _get_config ( i )
self . _resolver = DatabaseManager ( config )
|
def fit_tip_labels ( self ) :
"""Modifies display range to ensure tip labels fit . This is a bit hackish
still . The problem is that the ' extents ' range of the rendered text
is totally correct . So we add a little buffer here . Should add for
user to be able to modify this if needed . If not using edge lengths
then need to use unit length for treeheight ."""
|
# user entered values
# if self . style . axes . x _ domain _ max or self . style . axes . y _ domain _ min :
# self . axes . x . domain . max = self . style . axes . x _ domain _ max
# self . axes . y . domain . min = self . style . axes . y _ domain _ min
# IF USE WANTS TO CHANGE IT THEN DO IT AFTER USING AXES
# or auto - fit ( tree height )
# else :
if self . style . use_edge_lengths :
addon = self . ttree . treenode . height * .85
else :
addon = self . ttree . treenode . get_farthest_leaf ( True ) [ 1 ]
# modify display for orientations
if self . style . tip_labels :
if self . style . orient == "right" :
self . axes . x . domain . max = addon
elif self . style . orient == "down" :
self . axes . y . domain . min = - 1 * addon
|
def string_sequence ( n : int ) -> str :
"""Return a string containing space - delimited numbers starting from 0 upto n inclusive .
Parameters :
n ( int ) : The end of the sequence ( inclusive ) .
Returns :
str : A string representing the sequence .
Examples :
> > > string _ sequence ( 0)
> > > string _ sequence ( 5)
'0 1 2 3 4 5'"""
|
return ' ' . join ( str ( i ) for i in range ( n + 1 ) )
|
def load_logos ( filename ) :
'''Load logos from a geologos archive from < filename >
< filename > can be either a local path or a remote URL .'''
|
if filename . startswith ( 'http' ) :
log . info ( 'Downloading GeoLogos bundle: %s' , filename )
filename , _ = urlretrieve ( filename , tmp . path ( 'geologos.tar.xz' ) )
log . info ( 'Extracting GeoLogos bundle' )
with contextlib . closing ( lzma . LZMAFile ( filename ) ) as xz :
with tarfile . open ( fileobj = xz ) as f :
f . extractall ( tmp . root )
log . info ( 'Moving to the final location and cleaning up' )
if os . path . exists ( logos . root ) :
shutil . rmtree ( logos . root )
shutil . move ( tmp . path ( 'logos' ) , logos . root )
log . info ( 'Done' )
|
def decline_strong_feminine_noun ( ns : str , gs : str , np : str ) :
"""Gives the full declension of strong feminine nouns .
o macron - stem
Most of strong feminine nouns follows the declension of rún and för .
> > > decline _ strong _ feminine _ noun ( " rún " , " rúnar " , " rúnar " )
rún
rún
rún
rúnar
rúnar
rúnar
rúnum
rúna
> > > decline _ strong _ feminine _ noun ( " för " , " farar " , " farar " )
för
för
för
farar
farar
farar
förum
fara
> > > decline _ strong _ feminine _ noun ( " kerling " , " kerlingar " , " kerlingar " )
kerling
kerling
kerlingu
kerlingar
kerlingar
kerlingar
kerlingum
kerlinga
> > > decline _ strong _ feminine _ noun ( " skel " , " skeljar " , " skeljar " )
skel
skel
skel
skeljar
skeljar
skeljar
skeljum
skelja
> > > decline _ strong _ feminine _ noun ( " ör " , " örvar " , " örvar " )
ör
ör
ör
örvar
örvar
örvar
örum
örva
> > > decline _ strong _ feminine _ noun ( " heiðr " , " heiðar " , " heiðar " )
heiðr
heiði
heiði
heiðar
heiðar
heiðar
heiðum
heiða
i - stem
> > > decline _ strong _ feminine _ noun ( " öxl " , " axlar " , " axlir " )
öxl
öxl
öxl
axlar
axlir
axlir
öxlum
axla
> > > decline _ strong _ feminine _ noun ( " höfn " , " hafnar " , " hafnir " )
höfn
höfn
höfn
hafnar
hafnir
hafnir
höfnum
hafna
> > > decline _ strong _ feminine _ noun ( " norn " , " nornar " , " nornir " )
norn
norn
norn
nornar
nornir
nornir
nornum
norna
> > > decline _ strong _ feminine _ noun ( " jörð " , " jarðar " , " jarðir " )
jörð
jörð
jörð
jarðar
jarðir
jarðir
jörðum
jarða
> > > decline _ strong _ feminine _ noun ( " borg " , " borgar " , " borgir " )
borg
borg
borgu
borgar
borgir
borgir
borgum
borga
: param ns : nominative singular
: param gs : genitive singular
: param np : nominative plural
: return :"""
|
# nominative singular
print ( ns )
# accusative singular
if len ( ns ) > 2 and ns [ - 1 ] == "r" and ns [ - 2 ] in CONSONANTS :
print ( ns [ : - 1 ] + "i" )
else :
print ( ns )
# dative singular
if len ( ns ) > 2 and ns [ - 1 ] == "r" and ns [ - 2 ] in CONSONANTS :
print ( ns [ : - 1 ] + "i" )
elif ns . endswith ( "ing" ) or ns . endswith ( "rg" ) :
print ( ns + "u" )
else :
print ( ns )
# genitive singular
print ( gs )
# nominative plural
print ( np )
# accusative plural
print ( np )
# dative plural
# print ( " dative plural " + np [ len ( np [ : - 3 ] ) : ] [ 0 ] )
if np [ len ( np [ : - 3 ] ) : ] [ 0 ] == "v" :
print ( apply_u_umlaut ( np [ : - 2 ] ) [ : - 1 ] + "um" )
elif np [ len ( np [ : - 3 ] ) : ] [ 0 ] == "j" :
print ( apply_u_umlaut ( np [ : - 2 ] ) + "um" )
else :
print ( apply_u_umlaut ( np [ : - 2 ] ) + "um" )
# genitive plural
print ( np [ : - 2 ] + "a" )
|
async def get_connections ( self , data = True ) :
"""Return connections for all the agents in the slave environments .
This is a managing function for
: meth : ` ~ creamas . mp . MultiEnvironment . get _ connections ` ."""
|
return await self . menv . get_connections ( data = data , as_coro = True )
|
def batch_face_locations ( images , number_of_times_to_upsample = 1 , batch_size = 128 ) :
"""Returns an 2d array of bounding boxes of human faces in a image using the cnn face detector
If you are using a GPU , this can give you much faster results since the GPU
can process batches of images at once . If you aren ' t using a GPU , you don ' t need this function .
: param img : A list of images ( each as a numpy array )
: param number _ of _ times _ to _ upsample : How many times to upsample the image looking for faces . Higher numbers find smaller faces .
: param batch _ size : How many images to include in each GPU processing batch .
: return : A list of tuples of found face locations in css ( top , right , bottom , left ) order"""
|
def convert_cnn_detections_to_css ( detections ) :
return [ _trim_css_to_bounds ( _rect_to_css ( face . rect ) , images [ 0 ] . shape ) for face in detections ]
raw_detections_batched = _raw_face_locations_batched ( images , number_of_times_to_upsample , batch_size )
return list ( map ( convert_cnn_detections_to_css , raw_detections_batched ) )
|
def match ( self , pattern , screen = None , rect = None , offset = None , threshold = None , method = None ) :
"""Check if image position in screen
Args :
- pattern : Image file name or opencv image object
- screen ( PIL . Image ) : optional , if not None , screenshot method will be called
- threshold ( float ) : it depends on the image match method
- method ( string ) : choices on < template | sift >
Returns :
None or FindPoint , For example :
FindPoint ( pos = ( 20 , 30 ) , method = ' tmpl ' , confidence = 0.801 , matched = True )
Only when confidence > self . image _ match _ threshold , matched will be True
Raises :
TypeError : when image _ match _ method is invalid"""
|
pattern = self . pattern_open ( pattern )
search_img = pattern . image
pattern_scale = self . _cal_scale ( pattern )
if pattern_scale != 1.0 :
search_img = cv2 . resize ( search_img , ( 0 , 0 ) , fx = pattern_scale , fy = pattern_scale , interpolation = cv2 . INTER_CUBIC )
screen = screen or self . region_screenshot ( )
threshold = threshold or pattern . threshold or self . image_match_threshold
# handle offset if percent , ex ( 0.2 , 0.8)
dx , dy = offset or pattern . offset or ( 0 , 0 )
dx = pattern . image . shape [ 1 ] * dx
# opencv object width
dy = pattern . image . shape [ 0 ] * dy
# opencv object height
dx , dy = int ( dx * pattern_scale ) , int ( dy * pattern_scale )
# image match
screen = imutils . from_pillow ( screen )
# convert to opencv image
if rect and isinstance ( rect , tuple ) and len ( rect ) == 4 :
( x0 , y0 , x1 , y1 ) = [ int ( v * pattern_scale ) for v in rect ]
( dx , dy ) = dx + x0 , dy + y0
screen = imutils . crop ( screen , x0 , y0 , x1 , y1 )
# cv2 . imwrite ( ' cc . png ' , screen )
match_method = method or self . image_match_method
ret = None
confidence = None
matched = False
position = None
if match_method == consts . IMAGE_MATCH_METHOD_TMPL : # IMG _ METHOD _ TMPL
ret = ac . find_template ( screen , search_img )
if ret is None :
return None
confidence = ret [ 'confidence' ]
if confidence > threshold :
matched = True
( x , y ) = ret [ 'result' ]
position = ( x + dx , y + dy )
# fix by offset
elif match_method == consts . IMAGE_MATCH_METHOD_SIFT :
ret = ac . find_sift ( screen , search_img , min_match_count = 10 )
if ret is None :
return None
confidence = ret [ 'confidence' ]
matches , total = confidence
if 1.0 * matches / total > 0.5 : # FIXME ( ssx ) : sift just write here
matched = True
( x , y ) = ret [ 'result' ]
position = ( x + dx , y + dy )
# fix by offset
elif match_method == consts . IMAGE_MATCH_METHOD_AUTO :
fp = self . _match_auto ( screen , search_img , threshold )
if fp is None :
return None
( x , y ) = fp . pos
position = ( x + dx , y + dy )
return FindPoint ( position , fp . confidence , fp . method , fp . matched )
else :
raise TypeError ( "Invalid image match method: %s" % ( match_method , ) )
( x , y ) = ret [ 'result' ]
position = ( x + dx , y + dy )
# fix by offset
if self . bounds :
x , y = position
position = ( x + self . bounds . left , y + self . bounds . top )
return FindPoint ( position , confidence , match_method , matched = matched )
|
def sortJobs ( jobTypes , options ) :
"""Return a jobTypes all sorted ."""
|
longforms = { "med" : "median" , "ave" : "average" , "min" : "min" , "total" : "total" , "max" : "max" , }
sortField = longforms [ options . sortField ]
if ( options . sortCategory == "time" or options . sortCategory == "clock" or options . sortCategory == "wait" or options . sortCategory == "memory" ) :
return sorted ( jobTypes , key = lambda tag : getattr ( tag , "%s_%s" % ( sortField , options . sortCategory ) ) , reverse = options . sortReverse )
elif options . sortCategory == "alpha" :
return sorted ( jobTypes , key = lambda tag : tag . name , reverse = options . sortReverse )
elif options . sortCategory == "count" :
return sorted ( jobTypes , key = lambda tag : tag . total_number , reverse = options . sortReverse )
|
def append_underlying_workflow_describe ( globalworkflow_desc ) :
"""Adds the " workflowDescribe " field to the config for each region of
the global workflow . The value is the description of an underlying
workflow in that region ."""
|
if not globalworkflow_desc or globalworkflow_desc [ 'class' ] != 'globalworkflow' or not 'regionalOptions' in globalworkflow_desc :
return globalworkflow_desc
for region , config in globalworkflow_desc [ 'regionalOptions' ] . items ( ) :
workflow_id = config [ 'workflow' ]
workflow_desc = dxpy . api . workflow_describe ( workflow_id )
globalworkflow_desc [ 'regionalOptions' ] [ region ] [ 'workflowDescribe' ] = workflow_desc
return globalworkflow_desc
|
def compile_regex_from_str ( self , ft_str ) :
"""Given a string describing features masks for a sequence of segments ,
return a regex matching the corresponding strings .
Args :
ft _ str ( str ) : feature masks , each enclosed in square brackets , in
which the features are delimited by any standard delimiter .
Returns :
Pattern : regular expression pattern equivalent to ` ft _ str `"""
|
sequence = [ ]
for m in re . finditer ( r'\[([^]]+)\]' , ft_str ) :
ft_mask = fts ( m . group ( 1 ) )
segs = self . all_segs_matching_fts ( ft_mask )
sub_pat = '({})' . format ( '|' . join ( segs ) )
sequence . append ( sub_pat )
pattern = '' . join ( sequence )
regex = re . compile ( pattern )
return regex
|
def _get_db ( ) :
"""Get database from server"""
|
with cd ( env . remote_path ) :
file_path = '/tmp/' + _sql_paths ( 'remote' , str ( base64 . urlsafe_b64encode ( uuid . uuid4 ( ) . bytes ) ) . replace ( '=' , '' ) )
run ( env . python + ' manage.py dump_database | gzip > ' + file_path )
local_file_path = './backups/' + _sql_paths ( 'remote' , datetime . now ( ) )
get ( file_path , local_file_path )
run ( 'rm ' + file_path )
return local_file_path
|
def vcirc ( self , R , phi = None ) :
"""NAME :
vcirc
PURPOSE :
calculate the circular velocity at R in potential Pot
INPUT :
Pot - Potential instance or list of such instances
R - Galactocentric radius ( can be Quantity )
phi = ( None ) azimuth to use for non - axisymmetric potentials
OUTPUT :
circular rotation velocity
HISTORY :
2011-10-09 - Written - Bovy ( IAS )
2016-06-15 - Added phi = keyword for non - axisymmetric potential - Bovy ( UofT )"""
|
return nu . sqrt ( R * - self . Rforce ( R , phi = phi , use_physical = False ) )
|
def workflow ( ctx , client ) :
"""List or manage workflows with subcommands ."""
|
if ctx . invoked_subcommand is None :
from renku . models . refs import LinkReference
names = defaultdict ( list )
for ref in LinkReference . iter_items ( client , common_path = 'workflows' ) :
names [ ref . reference . name ] . append ( ref . name )
for path in client . workflow_path . glob ( '*.cwl' ) :
click . echo ( '{path}: {names}' . format ( path = path . name , names = ', ' . join ( click . style ( _deref ( name ) , fg = 'green' ) for name in names [ path . name ] ) , ) )
|
def write_cookies_to_cache ( cj , username ) :
"""Save RequestsCookieJar to disk in Mozilla ' s cookies . txt file format .
This prevents us from repeated authentications on the
accounts . coursera . org and class . coursera . org / class _ name sites ."""
|
mkdir_p ( PATH_COOKIES , 0o700 )
path = get_cookies_cache_path ( username )
cached_cj = cookielib . MozillaCookieJar ( )
for cookie in cj :
cached_cj . set_cookie ( cookie )
cached_cj . save ( path )
|
def _init_pval_name ( self , ** kws ) :
"""Initialize pvalue attribute name ."""
|
if 'pval_name' in kws :
return kws [ 'pval_name' ]
# If go2res contains GO Terms
if self . is_goterm :
return "p_{M}" . format ( M = next ( iter ( self . go2res . values ( ) ) ) . get_method_name ( ) )
# If go2res contains GO namedtuples
for fld in next ( iter ( self . go2res . values ( ) ) ) . _fields :
if fld [ : 2 ] == 'p_' and fld != 'p_uncorrected' :
return fld
|
def sync_table ( model ) :
"""Inspects the model and creates / updates the corresponding table and columns .
Note that the attributes removed from the model are not deleted on the database .
They become effectively ignored by ( will not show up on ) the model ."""
|
if not issubclass ( model , Model ) :
raise CQLEngineException ( "Models must be derived from base Model." )
if model . __abstract__ :
raise CQLEngineException ( "cannot create table from abstract model" )
# construct query string
cf_name = model . column_family_name ( )
raw_cf_name = model . column_family_name ( include_keyspace = False )
ks_name = model . _get_keyspace ( )
cluster = get_cluster ( )
keyspace = cluster . metadata . keyspaces [ ks_name ]
tables = keyspace . tables
# check for an existing column family
if raw_cf_name not in tables :
qs = get_create_table ( model )
try :
execute ( qs )
except CQLEngineException as ex : # 1.2 doesn ' t return cf names , so we have to examine the exception
# and ignore if it says the column family already exists
if "Cannot add already existing column family" not in unicode ( ex ) :
raise
else : # see if we ' re missing any columns
fields = get_fields ( model )
field_names = [ x . name for x in fields ]
for name , col in model . _columns . items ( ) :
if col . primary_key or col . partition_key :
continue
# we can ' t mess with the PK
if col . db_field_name in field_names :
continue
# skip columns already defined
# add missing column using the column def
query = "ALTER TABLE {} add {}" . format ( cf_name , col . get_column_def ( ) )
logger . debug ( query )
execute ( query )
update_compaction ( model )
table = cluster . metadata . keyspaces [ ks_name ] . tables [ raw_cf_name ]
indexes = [ c for n , c in model . _columns . items ( ) if c . index ]
for column in indexes :
if table . columns [ column . db_field_name ] . index :
continue
qs = [ 'CREATE INDEX index_{}_{}' . format ( raw_cf_name , column . db_field_name ) ]
qs += [ 'ON {}' . format ( cf_name ) ]
qs += [ '("{}")' . format ( column . db_field_name ) ]
qs = ' ' . join ( qs )
execute ( qs )
|
def write_virtual_memory ( self , cpu_id , address , size , bytes_p ) :
"""Writes guest virtual memory , access handles ( MMIO + + ) are ignored .
This feature is not implemented in the 4.0.0 release but may show up
in a dot release .
in cpu _ id of type int
The identifier of the Virtual CPU .
in address of type int
The guest virtual address .
in size of type int
The number of bytes to read .
in bytes _ p of type str
The bytes to write ."""
|
if not isinstance ( cpu_id , baseinteger ) :
raise TypeError ( "cpu_id can only be an instance of type baseinteger" )
if not isinstance ( address , baseinteger ) :
raise TypeError ( "address can only be an instance of type baseinteger" )
if not isinstance ( size , baseinteger ) :
raise TypeError ( "size can only be an instance of type baseinteger" )
if not isinstance ( bytes_p , list ) :
raise TypeError ( "bytes_p can only be an instance of type list" )
for a in bytes_p [ : 10 ] :
if not isinstance ( a , basestring ) :
raise TypeError ( "array can only contain objects of type basestring" )
self . _call ( "writeVirtualMemory" , in_p = [ cpu_id , address , size , bytes_p ] )
|
def ave_laplacian ( self ) :
'''Another kind of laplacian normalization , used in the matlab PVF code .
Uses the formula : L = I - D ^ { - 1 } * W'''
|
W = self . matrix ( 'dense' )
# calculate - inv ( D )
Dinv = W . sum ( axis = 0 )
mask = Dinv != 0
Dinv [ mask ] = - 1. / Dinv [ mask ]
# calculate - inv ( D ) * W
lap = ( Dinv * W . T ) . T
# add I
lap . flat [ : : W . shape [ 0 ] + 1 ] += 1
# symmetrize
return ( lap + lap . T ) / 2.0
|
def is_mount ( self , name ) :
"""Test that ` name ` is a mount name"""
|
if self . mount_prefix . startswith ( self . module_prefix ) :
return name . startswith ( self . mount_prefix )
return name . startswith ( self . mount_prefix ) and not name . startswith ( self . module_prefix )
|
def sorted_names ( names ) :
"""Sort a list of names but keep the word ' default ' first if it ' s there ."""
|
names = list ( names )
have_default = False
if 'default' in names :
names . remove ( 'default' )
have_default = True
sorted_names = sorted ( names )
if have_default :
sorted_names = [ 'default' ] + sorted_names
return sorted_names
|
def rotate_around ( self , axis , theta ) :
"""Return the vector rotated around axis through angle theta .
Right hand rule applies ."""
|
# Adapted from equations published by Glenn Murray .
# http : / / inside . mines . edu / ~ gmurray / ArbitraryAxisRotation / ArbitraryAxisRotation . html
x , y , z = self . x , self . y , self . z
u , v , w = axis . x , axis . y , axis . z
# Extracted common factors for simplicity and efficiency
r2 = u ** 2 + v ** 2 + w ** 2
r = math . sqrt ( r2 )
ct = math . cos ( theta )
st = math . sin ( theta ) / r
dt = ( u * x + v * y + w * z ) * ( 1 - ct ) / r2
return Vector3 ( ( u * dt + x * ct + ( - w * y + v * z ) * st ) , ( v * dt + y * ct + ( w * x - u * z ) * st ) , ( w * dt + z * ct + ( - v * x + u * y ) * st ) )
|
def begin_x ( self ) :
"""Return the X - position of the begin point of this connector , in
English Metric Units ( as a | Length | object ) ."""
|
cxnSp = self . _element
x , cx , flipH = cxnSp . x , cxnSp . cx , cxnSp . flipH
begin_x = x + cx if flipH else x
return Emu ( begin_x )
|
def district_events ( self , district , simple = False , keys = False ) :
"""Return list of events in a given district .
: param district : Key of district whose events you want .
: param simple : Get only vital data .
: param keys : Return list of event keys only rather than full data on every event .
: return : List of string keys or Event objects ."""
|
if keys :
return self . _get ( 'district/%s/events/keys' % district )
else :
return [ Event ( raw ) for raw in self . _get ( 'district/%s/events%s' % ( district , '/simple' if simple else '' ) ) ]
|
def get_default_query_from_module ( module ) :
"""Given a % % sql module return the default ( last ) query for the module .
Args :
module : the % % sql module .
Returns :
The default query associated with this module ."""
|
if isinstance ( module , types . ModuleType ) :
return module . __dict__ . get ( _SQL_MODULE_LAST , None )
return None
|
def upload_hub ( hub , host , remote_dir , user = None , port = 22 , rsync_options = RSYNC_OPTIONS , staging = None ) :
"""Renders , stages , and uploads a hub ."""
|
hub . render ( )
if staging is None :
staging = tempfile . mkdtemp ( )
staging , linknames = stage_hub ( hub , staging = staging )
local_dir = os . path . join ( staging )
upload ( host , user , local_dir = local_dir , remote_dir = remote_dir , rsync_options = rsync_options )
return linknames
|
def _graph_successors ( self , graph , node ) :
"""Return the successors of a node in the graph .
This method can be overriden in case there are special requirements with the graph and the successors . For
example , when we are dealing with a control flow graph , we may not want to get the FakeRet successors .
: param graph : The graph .
: param node : The node of which we want to get the successors .
: return : An iterator of successors .
: rtype : iter"""
|
if self . _graph_successors_func is not None :
return self . _graph_successors_func ( graph , node )
return graph . successors ( node )
|
def contains_container ( self , path ) :
"""Returns True if a container exists at the specified path ,
otherwise False .
: param path : str or Path instance
: return :
: rtype : bool
: raises ValueError : A component of path is a field name ."""
|
path = make_path ( path )
try :
self . get_container ( path )
return True
except KeyError :
return False
|
def apply_u_umlaut ( stem : str ) :
"""Changes the vowel of the last syllable of the given stem if the vowel is affected by an u - umlaut .
> > > apply _ u _ umlaut ( " far " )
' för '
> > > apply _ u _ umlaut ( " ör " )
' ör '
> > > apply _ u _ umlaut ( " axl " )
' öxl '
> > > apply _ u _ umlaut ( " hafn " )
' höfn '
: param stem :
: return :"""
|
assert len ( stem ) > 0
s_stem = s . syllabify_ssp ( stem . lower ( ) )
if len ( s_stem ) == 1 :
last_syllable = OldNorseSyllable ( s_stem [ - 1 ] , VOWELS , CONSONANTS )
last_syllable . apply_u_umlaut ( )
return "" . join ( s_stem [ : - 1 ] ) + str ( last_syllable )
else :
penultimate_syllable = OldNorseSyllable ( s_stem [ - 2 ] , VOWELS , CONSONANTS )
last_syllable = OldNorseSyllable ( s_stem [ - 1 ] , VOWELS , CONSONANTS )
penultimate_syllable . apply_u_umlaut ( )
last_syllable . apply_u_umlaut ( True )
last_syllable . apply_u_umlaut ( True )
return "" . join ( s_stem [ : - 2 ] ) + str ( penultimate_syllable ) + str ( last_syllable )
|
def console_size ( fd = 1 ) :
"""Return console size as a ( LINES , COLUMNS ) tuple"""
|
try :
import fcntl
import termios
import struct
except ImportError :
size = os . getenv ( 'LINES' , 25 ) , os . getenv ( 'COLUMNS' , 80 )
else :
size = struct . unpack ( 'hh' , fcntl . ioctl ( fd , termios . TIOCGWINSZ , b'1234' ) )
return size
|
def _conv_name ( x ) :
'''If this XML tree has an xmlns attribute , then etree will add it
to the beginning of the tag , like : " { http : / / path } tag " .'''
|
if '}' in x :
comps = x . split ( '}' )
name = comps [ 1 ]
return name
return x
|
def read ( cls , filename , offset = 0 ) :
"""Read an ID3v2 tag from a file ."""
|
i = 0
with fileutil . opened ( filename , "rb" ) as file :
file . seek ( offset )
tag = cls ( )
tag . _read_header ( file )
for ( frameid , bflags , data ) in tag . _read_frames ( file ) :
if len ( data ) == 0 :
warn ( "{0}: Ignoring empty frame" . format ( frameid ) , EmptyFrameWarning )
else :
frame = tag . _decode_frame ( frameid , bflags , data , i )
if frame is not None :
l = tag . _frames . setdefault ( frame . frameid , [ ] )
l . append ( frame )
if file . tell ( ) > tag . offset + tag . size :
break
i += 1
try :
tag . _filename = file . name
except AttributeError :
pass
return tag
|
def _minute_exclusion_tree ( self ) :
"""Build an interval tree keyed by the start and end of each range
of positions should be dropped from windows . ( These are the minutes
between an early close and the minute which would be the close based
on the regular period if there were no early close . )
The value of each node is the same start and end position stored as
a tuple .
The data is stored as such in support of a fast answer to the question ,
does a given start and end position overlap any of the exclusion spans ?
Returns
IntervalTree containing nodes which represent the minutes to exclude
because of early closes ."""
|
itree = IntervalTree ( )
for market_open , early_close in self . _minutes_to_exclude ( ) :
start_pos = self . _find_position_of_minute ( early_close ) + 1
end_pos = ( self . _find_position_of_minute ( market_open ) + self . _minutes_per_day - 1 )
data = ( start_pos , end_pos )
itree [ start_pos : end_pos + 1 ] = data
return itree
|
def value_to_db ( self , value ) :
"""Returns field ' s single value prepared for saving into a database ."""
|
assert isinstance ( value , six . integer_types )
return str ( value ) . encode ( "utf_8" )
|
def use_isolated_book_view ( self ) :
"""Pass through to provider CommentLookupSession . use _ isolated _ book _ view"""
|
self . _book_view = ISOLATED
# self . _ get _ provider _ session ( ' comment _ lookup _ session ' ) # To make sure the session is tracked
for session in self . _get_provider_sessions ( ) :
try :
session . use_isolated_book_view ( )
except AttributeError :
pass
|
def _simple_lock ( f ) :
"""Simple file lock , times out after 20 second assuming lock is stale"""
|
lock_file = f + ".lock"
timeout = 20
curtime = 0
interval = 2
while os . path . exists ( lock_file ) :
time . sleep ( interval )
curtime += interval
if curtime > timeout :
os . remove ( lock_file )
with open ( lock_file , "w" ) as out_handle :
out_handle . write ( "locked" )
yield
if os . path . exists ( lock_file ) :
os . remove ( lock_file )
|
def save_workflow_graph_for ( self , spec_name , fname , full = False , style = 'flat' , ** kwargs ) :
"""Saves a graph of the workflow to generate the requested spec _ name
Parameters
spec _ name : str
Name of the spec to generate the graph for
fname : str
The filename for the saved graph
style : str
The style of the graph , can be one of can be one of
' orig ' , ' flat ' , ' exec ' , ' hierarchical '"""
|
pipeline = self . spec ( spec_name ) . pipeline
if full :
workflow = pe . Workflow ( name = '{}_gen' . format ( spec_name ) , base_dir = self . processor . work_dir )
self . processor . _connect_pipeline ( pipeline , workflow , ** kwargs )
else :
workflow = pipeline . _workflow
fname = op . expanduser ( fname )
if not fname . endswith ( '.png' ) :
fname += '.png'
dotfilename = fname [ : - 4 ] + '.dot'
workflow . write_graph ( graph2use = style , dotfilename = dotfilename )
|
def register_event ( self , name , callback , validator ) :
"""Register a callback to receive events .
Every event with the matching name will have its payload validated
using validator and then will be passed to callback if validation
succeeds .
Callback must be a normal callback function , coroutines are not
allowed . If you need to run a coroutine you are free to schedule it
from your callback .
Args :
name ( str ) : The name of the event that we are listening
for
callback ( callable ) : The function that should be called
when a message that matches validator is received .
validator ( Verifier ) : A schema verifier that will
validate a received message uniquely"""
|
async def _validate_and_call ( message ) :
payload = message . get ( 'payload' )
try :
payload = validator . verify ( payload )
except ValidationError :
self . _logger . warning ( "Dropping invalid payload for event %s, payload=%s" , name , payload )
return
try :
result = callback ( payload )
if inspect . isawaitable ( result ) :
await result
except : # pylint : disable = bare - except ; This is a background logging routine
self . _logger . error ( "Error calling callback for event %s, payload=%s" , name , payload , exc_info = True )
self . _manager . every_match ( _validate_and_call , type = "event" , name = name )
|
def _get_authz_session ( self ) :
"""Gets the AuthorizationSession for the default ( bootstrap ) typed Vault
Assumes only one vault of this Type , but it can have children depending on underlying impl ."""
|
from . . utilities import BOOTSTRAP_VAULT_TYPE
try :
vaults = self . _get_vault_lookup_session ( ) . get_vaults_by_genus_type ( BOOTSTRAP_VAULT_TYPE )
except Unimplemented :
return self . _get_authz_manager ( ) . get_authorization_session ( )
if vaults . available ( ) :
vault = vaults . next ( )
return self . _get_authz_manager ( ) . get_authorization_session_for_vault ( vault . get_id ( ) )
else :
return self . _get_authz_manager ( ) . get_authorization_session ( )
|
def dispose ( self ) :
"""Disposes of this events writer manager , making it no longer usable .
Call this method when this object is done being used in order to clean up
resources and handlers . This method should ever only be called once ."""
|
self . _lock . acquire ( )
self . _events_writer . Close ( )
self . _events_writer = None
self . _lock . release ( )
|
def import_path ( path ) :
"""Imports any valid python module or attribute path as though it were a
module
: Example :
> > > from yamlconf import import _ path
> > > from my _ package . my _ module . my _ submodule import attribute
> > > attribute . sub _ attribute = = . . . import _ path ( " y _ package . my _ module . my _ submodule . attribute . sub _ attribute " )
True
: Parameters :
path : ` str `
A valid python path that crosses modules and / or attributes"""
|
# noqa
sys . path . insert ( 0 , "." )
parts = path . split ( "." )
module = None
# Import the module as deeply as possible . Prioritize an attribute chain
# over a module chain
for i in range ( 1 , len ( parts ) + 1 ) :
if module is not None and hasattr ( module , parts [ i - 1 ] ) :
try :
return _import_attributes ( module , parts [ i - 1 : ] )
except AttributeError :
pass
module_path = "." . join ( parts [ 0 : i ] )
module = importlib . import_module ( module_path )
return module
|
def _write_session ( self ) :
"""Write SDK session file
Args :
version ( str ) : the version of the server"""
|
base_name = "%ssession" % self . _product_accronym . lower ( )
filename = "%s%s.py" % ( self . _class_prefix . lower ( ) , base_name )
override_content = self . _extract_override_content ( base_name )
self . write ( destination = self . output_directory , filename = filename , template_name = "session.py.tpl" , version = self . api_version , product_accronym = self . _product_accronym , class_prefix = self . _class_prefix , root_api = self . api_root , api_prefix = self . api_prefix , override_content = override_content , header = self . header_content )
|
def read ( self , count = None , block = None , last_id = None ) :
"""Monitor stream for new data .
: param int count : limit number of messages returned
: param int block : milliseconds to block , 0 for indefinitely
: param last _ id : Last id read ( an exclusive lower - bound ) . If the ' $ '
value is given , we will only read values added * after * our command
started blocking .
: returns : a list of ( message id , data ) 2 - tuples ."""
|
if last_id is None :
last_id = '0-0'
resp = self . database . xread ( { self . key : _decode ( last_id ) } , count , block )
# resp is a 2 - tuple of stream name - > message list .
return resp [ 0 ] [ 1 ] if resp else [ ]
|
def set_col_first ( df , col_names ) :
"""set selected columns first in a pandas . DataFrame .
This function sets cols with names given in col _ names ( a list ) first in
the DataFrame . The last col in col _ name will come first ( processed last )"""
|
column_headings = df . columns
column_headings = column_headings . tolist ( )
try :
for col_name in col_names :
i = column_headings . index ( col_name )
column_headings . pop ( column_headings . index ( col_name ) )
column_headings . insert ( 0 , col_name )
finally :
df = df . reindex ( columns = column_headings )
return df
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.