signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_permissions ( FunctionName , Qualifier = None , region = None , key = None , keyid = None , profile = None ) :
'''Get resource permissions for the given lambda function
Returns dictionary of permissions , by statement ID
CLI Example :
. . code - block : : bash
salt myminion boto _ lamba . get _ permissions my _ function
permissions : { . . . }'''
|
try :
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
kwargs = { }
if Qualifier is not None :
kwargs [ 'Qualifier' ] = Qualifier
# The get _ policy call is not symmetric with add / remove _ permissions . So
# massage it until it is , for better ease of use .
policy = conn . get_policy ( FunctionName = FunctionName , ** kwargs )
policy = policy . get ( 'Policy' , { } )
if isinstance ( policy , six . string_types ) :
policy = salt . utils . json . loads ( policy )
if policy is None :
policy = { }
permissions = { }
for statement in policy . get ( 'Statement' , [ ] ) :
condition = statement . get ( 'Condition' , { } )
principal = statement . get ( 'Principal' , { } )
if 'AWS' in principal :
principal = principal [ 'AWS' ] . split ( ':' ) [ 4 ]
else :
principal = principal . get ( 'Service' )
permission = { 'Action' : statement . get ( 'Action' ) , 'Principal' : principal , }
if 'ArnLike' in condition :
permission [ 'SourceArn' ] = condition [ 'ArnLike' ] . get ( 'AWS:SourceArn' )
if 'StringEquals' in condition :
permission [ 'SourceAccount' ] = condition [ 'StringEquals' ] . get ( 'AWS:SourceAccount' )
permissions [ statement . get ( 'Sid' ) ] = permission
return { 'permissions' : permissions }
except ClientError as e :
err = __utils__ [ 'boto3.get_error' ] ( e )
if e . response . get ( 'Error' , { } ) . get ( 'Code' ) == 'ResourceNotFoundException' :
return { 'permissions' : None }
return { 'permissions' : None , 'error' : err }
|
def t_ARTICLEHEADER ( self , token ) : # \ xef \ xbc \ x9a is the " fullwidth colon " used in Japanese for instance
ur'\ # \ # \ s + < article - ( ? P < number > [ A - Z0-9 ] + ) > < ( ? P < newtag > [ a - zA - Z0-9 - ] + ) > < ( ? P < oldtag > [ a - zA - Z0-9 - ] + ) > [ ] * ( ? P < name > [ ^ \ < ] + ? ) ( ? P < sep > : \ s | \ xef \ xbc \ x9a ) ( ? P < title > [ ^ < \ n ] + ) \ n'
|
number = token . lexer . lexmatch . group ( "number" ) . decode ( "utf8" )
newtag = token . lexer . lexmatch . group ( "newtag" ) . decode ( "utf8" )
oldtag = token . lexer . lexmatch . group ( "oldtag" ) . decode ( "utf8" )
name = token . lexer . lexmatch . group ( "name" ) . decode ( "utf8" )
sep = token . lexer . lexmatch . group ( "sep" ) . decode ( "utf8" )
title = token . lexer . lexmatch . group ( "title" ) . decode ( "utf8" )
token . value = ( number , newtag , oldtag , name , title , sep )
token . lexer . lineno += 1
return token
|
def update_ports ( self , ports , id_or_uri , timeout = - 1 ) :
"""Updates the interconnect ports .
Args :
id _ or _ uri : Can be either the interconnect id or the interconnect uri .
ports ( list ) : Ports to update .
timeout : Timeout in seconds . Wait for task completion by default . The timeout does not abort the operation
in OneView ; it just stops waiting for its completion .
Returns :
dict : The interconnect ."""
|
resources = merge_default_values ( ports , { 'type' : 'port' } )
uri = self . _client . build_uri ( id_or_uri ) + "/update-ports"
return self . _client . update ( resources , uri , timeout )
|
def get_themes_directory ( theme_name = None , png = False ) :
"""Return an absolute path the to / themes directory"""
|
dir_themes = os . path . join ( get_file_directory ( ) , "themes" )
if theme_name is None :
return dir_themes
if theme_name in os . listdir ( dir_themes ) :
return dir_themes
dir = "png" if png is True else "gif"
return os . path . join ( get_file_directory ( ) , dir )
|
def list_ ( ) :
'''List installed Perl modules , and the version installed
CLI Example :
. . code - block : : bash
salt ' * ' cpan . list'''
|
ret = { }
cmd = 'cpan -l'
out = __salt__ [ 'cmd.run' ] ( cmd ) . splitlines ( )
for line in out :
comps = line . split ( )
ret [ comps [ 0 ] ] = comps [ 1 ]
return ret
|
def extendable ( network , args , line_max ) :
"""Function that sets selected components extendable
' network ' for all lines , links and transformers
' german _ network ' for all lines , links and transformers located in Germany
' foreign _ network ' for all foreign lines , links and transformers
' transformers ' for all transformers
' storages ' for extendable storages
' overlay _ network ' for lines , links and trafos in extension scenerio ( s )
Parameters
network : : class : ` pypsa . Network
Overall container of PyPSA
args : dict
Arguments set in appl . py
Returns
network : : class : ` pypsa . Network
Overall container of PyPSA"""
|
if 'network' in args [ 'extendable' ] :
network . lines . s_nom_extendable = True
network . lines . s_nom_min = network . lines . s_nom
if not line_max == None :
network . lines . s_nom_max = line_max * network . lines . s_nom
else :
network . lines . s_nom_max = float ( "inf" )
if not network . transformers . empty :
network . transformers . s_nom_extendable = True
network . transformers . s_nom_min = network . transformers . s_nom
if not line_max == None :
network . transformers . s_nom_max = line_max * network . transformers . s_nom
else :
network . transformers . s_nom_max = float ( "inf" )
if not network . links . empty :
network . links . p_nom_extendable = True
network . links . p_nom_min = network . links . p_nom
network . links . p_nom_max = float ( "inf" )
if not line_max == None :
network . links . p_nom_max = line_max * network . links . p_nom
else :
network . links . p_nom_max = float ( "inf" )
network = set_line_costs ( network , args )
network = set_trafo_costs ( network , args )
if 'german_network' in args [ 'extendable' ] :
buses = network . buses [ ~ network . buses . index . isin ( buses_by_country ( network ) . index ) ]
network . lines . loc [ ( network . lines . bus0 . isin ( buses . index ) ) & ( network . lines . bus1 . isin ( buses . index ) ) , 's_nom_extendable' ] = True
network . lines . loc [ ( network . lines . bus0 . isin ( buses . index ) ) & ( network . lines . bus1 . isin ( buses . index ) ) , 's_nom_min' ] = network . lines . s_nom
network . lines . loc [ ( network . lines . bus0 . isin ( buses . index ) ) & ( network . lines . bus1 . isin ( buses . index ) ) , 's_nom_max' ] = float ( "inf" )
if not line_max == None :
network . lines . loc [ ( network . lines . bus0 . isin ( buses . index ) ) & ( network . lines . bus1 . isin ( buses . index ) ) , 's_nom_max' ] = line_max * network . lines . s_nom
else :
network . lines . loc [ ( network . lines . bus0 . isin ( buses . index ) ) & ( network . lines . bus1 . isin ( buses . index ) ) , 's_nom_max' ] = float ( "inf" )
if not network . transformers . empty :
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) , 's_nom_extendable' ] = True
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) , 's_nom_min' ] = network . transformers . s_nom
if not line_max == None :
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) , 's_nom_max' ] = line_max * network . transformers . s_nom
else :
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) , 's_nom_max' ] = float ( "inf" )
if not network . links . empty :
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) & ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_extendable' ] = True
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) & ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_min' ] = network . links . p_nom
if not line_max == None :
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) & ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_max' ] = line_max * network . links . p_nom
else :
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) & ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_max' ] = float ( "inf" )
network = set_line_costs ( network , args )
network = set_trafo_costs ( network , args )
if 'foreign_network' in args [ 'extendable' ] :
buses = network . buses [ network . buses . index . isin ( buses_by_country ( network ) . index ) ]
network . lines . loc [ network . lines . bus0 . isin ( buses . index ) | network . lines . bus1 . isin ( buses . index ) , 's_nom_extendable' ] = True
network . lines . loc [ network . lines . bus0 . isin ( buses . index ) | network . lines . bus1 . isin ( buses . index ) , 's_nom_min' ] = network . lines . s_nom
if not line_max == None :
network . lines . loc [ network . lines . bus0 . isin ( buses . index ) | network . lines . bus1 . isin ( buses . index ) , 's_nom_max' ] = line_max * network . lines . s_nom
else :
network . lines . loc [ network . lines . bus0 . isin ( buses . index ) | network . lines . bus1 . isin ( buses . index ) , 's_nom_max' ] = float ( "inf" )
if not network . transformers . empty :
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) | network . transformers . bus1 . isin ( buses . index ) , 's_nom_extendable' ] = True
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) | network . transformers . bus1 . isin ( buses . index ) , 's_nom_min' ] = network . transformers . s_nom
if not line_max == None :
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) | network . transformers . bus1 . isin ( buses . index ) , 's_nom_max' ] = line_max * network . transformers . s_nom
else :
network . transformers . loc [ network . transformers . bus0 . isin ( buses . index ) | network . transformers . bus1 . isin ( buses . index ) , 's_nom_max' ] = float ( "inf" )
if not network . links . empty :
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) | ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_extendable' ] = True
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) | ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_min' ] = network . links . p_nom
if not line_max == None :
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) | ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_max' ] = line_max * network . links . p_nom
else :
network . links . loc [ ( network . links . bus0 . isin ( buses . index ) ) | ( network . links . bus1 . isin ( buses . index ) ) , 'p_nom_max' ] = float ( "inf" )
network = set_line_costs ( network , args )
network = set_trafo_costs ( network , args )
if 'transformers' in args [ 'extendable' ] :
network . transformers . s_nom_extendable = True
network . transformers . s_nom_min = network . transformers . s_nom
network . transformers . s_nom_max = float ( "inf" )
network = set_trafo_costs ( network )
if 'storages' in args [ 'extendable' ] or 'storage' in args [ 'extendable' ] :
if network . storage_units . carrier [ network . storage_units . carrier == 'extendable_storage' ] . any ( ) == 'extendable_storage' :
network . storage_units . loc [ network . storage_units . carrier == 'extendable_storage' , 'p_nom_extendable' ] = True
if 'generators' in args [ 'extendable' ] :
network . generators . p_nom_extendable = True
network . generators . p_nom_min = network . generators . p_nom
network . generators . p_nom_max = float ( "inf" )
if 'foreign_storage' in args [ 'extendable' ] :
network . storage_units . p_nom_extendable [ ( network . storage_units . bus . isin ( network . buses . index [ network . buses . country_code != 'DE' ] ) ) & ( network . storage_units . carrier . isin ( [ 'battery_storage' , 'hydrogen_storage' ] ) ) ] = True
network . storage_units . loc [ network . storage_units . p_nom_max . isnull ( ) , 'p_nom_max' ] = network . storage_units . p_nom
network . storage_units . loc [ ( network . storage_units . carrier == 'battery_storage' ) , 'capital_cost' ] = network . storage_units . loc [ ( network . storage_units . carrier == 'extendable_storage' ) & ( network . storage_units . max_hours == 6 ) , 'capital_cost' ] . max ( )
network . storage_units . loc [ ( network . storage_units . carrier == 'hydrogen_storage' ) , 'capital_cost' ] = network . storage_units . loc [ ( network . storage_units . carrier == 'extendable_storage' ) & ( network . storage_units . max_hours == 168 ) , 'capital_cost' ] . max ( )
network . storage_units . loc [ ( network . storage_units . carrier == 'battery_storage' ) , 'marginal_cost' ] = network . storage_units . loc [ ( network . storage_units . carrier == 'extendable_storage' ) & ( network . storage_units . max_hours == 6 ) , 'marginal_cost' ] . max ( )
network . storage_units . loc [ ( network . storage_units . carrier == 'hydrogen_storage' ) , 'marginal_cost' ] = network . storage_units . loc [ ( network . storage_units . carrier == 'extendable_storage' ) & ( network . storage_units . max_hours == 168 ) , 'marginal_cost' ] . max ( )
# Extension settings for extension - NEP 2035 scenarios
if 'NEP Zubaunetz' in args [ 'extendable' ] :
for i in range ( len ( args [ 'scn_extension' ] ) ) :
network . lines . loc [ ( network . lines . project != 'EnLAG' ) & ( network . lines . scn_name == 'extension_' + args [ 'scn_extension' ] [ i ] ) , 's_nom_extendable' ] = True
network . transformers . loc [ ( network . transformers . project != 'EnLAG' ) & ( network . transformers . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) ) , 's_nom_extendable' ] = True
network . links . loc [ network . links . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 'p_nom_extendable' ] = True
if 'overlay_network' in args [ 'extendable' ] :
for i in range ( len ( args [ 'scn_extension' ] ) ) :
network . lines . loc [ network . lines . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 's_nom_extendable' ] = True
network . lines . loc [ network . lines . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 's_nom_max' ] = network . lines . s_nom [ network . lines . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) ]
network . links . loc [ network . links . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 'p_nom_extendable' ] = True
network . transformers . loc [ network . transformers . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 's_nom_extendable' ] = True
network . lines . loc [ network . lines . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 'capital_cost' ] = network . lines . capital_cost / args [ 'branch_capacity_factor' ] [ 'eHV' ]
if 'overlay_lines' in args [ 'extendable' ] :
for i in range ( len ( args [ 'scn_extension' ] ) ) :
network . lines . loc [ network . lines . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 's_nom_extendable' ] = True
network . links . loc [ network . links . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 'p_nom_extendable' ] = True
network . lines . loc [ network . lines . scn_name == ( 'extension_' + args [ 'scn_extension' ] [ i ] ) , 'capital_cost' ] = network . lines . capital_cost + ( 2 * 14166 )
network . lines . s_nom_min [ network . lines . s_nom_extendable == False ] = network . lines . s_nom
network . transformers . s_nom_min [ network . transformers . s_nom_extendable == False ] = network . transformers . s_nom
network . lines . s_nom_max [ network . lines . s_nom_extendable == False ] = network . lines . s_nom
network . transformers . s_nom_max [ network . transformers . s_nom_extendable == False ] = network . transformers . s_nom
return network
|
def create_response ( self , data = None ) :
"""Create a response frame based on this frame .
: param data : Data section of response as bytearray . If None , request data section is kept .
: return : ModbusTCPFrame instance that represents a response"""
|
frame = deepcopy ( self )
if data is not None :
frame . data = data
frame . length = 2 + len ( frame . data )
return frame
|
def _write_json ( obj , path ) : # type : ( object , str ) - > None
"""Writes a serializeable object as a JSON file"""
|
with open ( path , 'w' ) as f :
json . dump ( obj , f )
|
def chat ( self , message ) :
"""Send chat message as a broadcast ."""
|
if message :
action_chat = sc_pb . ActionChat ( channel = sc_pb . ActionChat . Broadcast , message = message )
action = sc_pb . Action ( action_chat = action_chat )
return self . act ( action )
|
def save_token ( self , access_token ) :
"""Stores the access token and additional data in redis .
See : class : ` oauth2 . store . AccessTokenStore ` ."""
|
self . write ( access_token . token , access_token . __dict__ )
unique_token_key = self . _unique_token_key ( access_token . client_id , access_token . grant_type , access_token . user_id )
self . write ( unique_token_key , access_token . __dict__ )
if access_token . refresh_token is not None :
self . write ( access_token . refresh_token , access_token . __dict__ )
|
def start_debugging ( ) :
"""Start a debugging session after catching an exception .
This prints the traceback and start ipdb session in the frame of the error ."""
|
exc_type , exc_value , exc_tb = sys . exc_info ( )
# If the exception has been annotated to be re - raised , raise the exception
if hasattr ( exc_value , '_ipdbugger_let_raise' ) :
raise_ ( * sys . exc_info ( ) )
print ( )
for line in traceback . format_exception ( exc_type , exc_value , exc_tb ) :
print ( colored ( line , 'red' ) , end = ' ' )
# Get the frame with the error .
test_frame = sys . _getframe ( - 1 ) . f_back
from ipdb . __main__ import wrap_sys_excepthook
wrap_sys_excepthook ( )
IPDBugger ( exc_info = sys . exc_info ( ) ) . set_trace ( test_frame )
|
def y_subset ( y , query = None , aux = None , subset = None , dropna = False , outcome = 'true' , k = None , p = None , ascending = False , score = 'score' , p_of = 'notnull' ) :
"""Subset a model " y " dataframe
Args :
query : operates on y , or aux if present
subset : takes a dataframe or index thereof and subsets to that
dropna : means drop missing outcomes
return : top k ( count ) or p ( proportion ) if specified
p _ of : specifies what the proportion is relative to
' notnull ' means proportion is relative to labeled count
' true ' means proportion is relative to positive count
' all ' means proportion is relative to total count"""
|
if query is not None :
if aux is None :
y = y . query ( query )
else :
s = aux . ix [ y . index ]
if len ( s ) != len ( y ) :
logging . warning ( 'y not a subset of aux' )
y = y . ix [ s . query ( query ) . index ]
if subset is not None :
if hasattr ( subset , 'index' ) :
subset = subset . index
y = y . ix [ y . index . intersection ( subset ) ]
if dropna :
y = y . dropna ( subset = [ outcome ] )
if k is not None and p is not None :
raise ValueError ( "Cannot specify both k and p" )
elif k is not None :
k = k
elif p is not None :
if p_of == 'notnull' :
k = int ( p * y [ outcome ] . notnull ( ) . sum ( ) )
elif p_of == 'true' :
k = int ( p * y [ outcome ] . sum ( ) )
elif p_of == 'all' :
k = int ( p * len ( y ) )
else :
raise ValueError ( 'Invalid value for p_of: %s' % p_of )
else :
k = None
if k is not None :
y = y . sort_values ( score , ascending = ascending ) . head ( k )
return y
|
def replace_route ( route_table_id = None , destination_cidr_block = None , route_table_name = None , gateway_id = None , instance_id = None , interface_id = None , region = None , key = None , keyid = None , profile = None , vpc_peering_connection_id = None ) :
'''Replaces a route .
CLI Example :
. . code - block : : bash
salt myminion boto _ vpc . replace _ route ' rtb - 1f382e7d ' ' 10.0.0.0/16 ' gateway _ id = ' vgw - a1b2c3' '''
|
if not _exactly_one ( ( route_table_name , route_table_id ) ) :
raise SaltInvocationError ( 'One (but not both) of route_table_id or route_table_name ' 'must be provided.' )
if destination_cidr_block is None :
raise SaltInvocationError ( 'destination_cidr_block is required.' )
try :
if route_table_name :
route_table_id = _get_resource_id ( 'route_table' , route_table_name , region = region , key = key , keyid = keyid , profile = profile )
if not route_table_id :
return { 'replaced' : False , 'error' : { 'message' : 'route table {0} does not exist.' . format ( route_table_name ) } }
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if conn . replace_route ( route_table_id , destination_cidr_block , gateway_id = gateway_id , instance_id = instance_id , interface_id = interface_id , vpc_peering_connection_id = vpc_peering_connection_id ) :
log . info ( 'Route with cidr block %s on route table %s was replaced' , route_table_id , destination_cidr_block )
return { 'replaced' : True }
else :
log . warning ( 'Route with cidr block %s on route table %s was not replaced' , route_table_id , destination_cidr_block )
return { 'replaced' : False }
except BotoServerError as e :
return { 'replaced' : False , 'error' : __utils__ [ 'boto.get_error' ] ( e ) }
|
def parse_chinese_morphemes ( seq , context = False ) :
"""Parse a Chinese syllable and return its basic structure ."""
|
# get the tokens
if isinstance ( seq , list ) :
tokens = [ s for s in seq ]
else :
tokens = lingpy . ipa2tokens ( seq , merge_vowels = False )
# get the sound classes according to the art - model
arts = [ int ( x ) for x in lingpy . tokens2class ( tokens , _art , cldf = True ) ]
# get the pro - string
prostring = lingpy . prosodic_string ( arts )
# parse the zip of tokens and arts
I , M , N , C , T = '' , '' , '' , '' , ''
ini = False
med = False
nuc = False
cod = False
ton = False
triples = [ ( '?' , '?' , '?' ) ] + list ( zip ( tokens , arts , prostring ) ) + [ ( '?' , '?' , '?' ) ]
for i in range ( 1 , len ( triples ) - 1 ) : # enumerate ( triples [ 1 : - 1 ] ) : # zip ( tokens , arts , prostring ) :
t , c , p = triples [ i ]
_t , _c , _p = triples [ i - 1 ]
t_ , c_ , p_ = triples [ i + 1 ]
# check for initial entry first
if p == 'A' and _t == '?' : # now , if we have a j - sound and a vowel follows , we go directly to
# medial environment
if t [ 0 ] in 'jɥw' :
med = True
ini , nuc , cod , ton = False , False , False , False
else :
ini = True
med , nuc , doc , ton = False , False , False , False
# check for initial vowel
elif p == 'X' and _t == '?' :
if t [ 0 ] in 'iuy' and c_ == '7' :
med = True
ini , nuc , cod , ton = False , False , False , False
else :
nuc = True
ini , med , cod , ton = False , False , False , False
# check for medial after initial
elif p == 'C' :
med = True
ini , nuc , cod , ton = False , False , False , False
# check for vowel medial
elif p == 'X' and p_ == 'Y' : # if we have a medial vowel , we classify it as medial
if t in 'iyu' :
med = True
ini , nuc , cod , ton = False , False , False , False
else :
nuc = True
ini , med , cod , ton = False , False , False , False
# check for vowel without medial
elif p == 'X' or p == 'Y' :
if p_ in 'LTY' or p_ == '?' :
nuc = True
ini , med , cod , ton = False , False , False , False
elif p == 'Y' :
nuc = True
ini , med , cod , ton = 4 * [ False ]
else :
cod = True
ini , med , nuc , ton = 4 * [ False ]
# check for consonant
elif p == 'L' :
cod = True
ini , med , nuc , ton = 4 * [ False ]
# check for tone
elif p == 'T' :
ton = True
ini , med , nuc , cod = 4 * [ False ]
if ini :
I += t
elif med :
M += t
elif nuc :
N += t
elif cod :
C += t
else :
T += t
# bad conversion for output , but makes what it is supposed to do
out = [ I , M , N , C , T ]
tf = lambda x : x if x else '-'
out = [ tf ( x ) for x in out ]
# transform tones to normal letters
tones = dict ( zip ( '¹²³⁴⁵⁶⁷⁸⁹⁰₁₂₃₄₅₆₇₈₉₀' , '1234567890123456789' ) )
# now , if context is wanted , we ' ll yield that
ic = '1' if [ x for x in I if x in 'bdgmnŋȵɳɴ' ] else '0'
mc = '1' if [ m for m in M + N if m in 'ijyɥ' ] else '0'
cc = '1' if C in 'ptkʔ' else '0'
tc = '' . join ( [ tones . get ( x , x ) for x in T ] )
IC = '/' . join ( [ 'I' , ic , mc , cc , tc ] ) if I else ''
MC = '/' . join ( [ 'M' , ic , mc , cc , tc ] ) if M else ''
NC = '/' . join ( [ 'N' , ic , mc , cc , tc ] ) if N else ''
CC = '/' . join ( [ 'C' , ic , mc , cc , tc ] ) if C else ''
TC = '/' . join ( [ 'T' , ic , mc , cc , tc ] ) if T else ''
if context :
return out , [ x for x in [ IC , MC , NC , CC , TC ] if x ]
return out
|
def evaluate_inline_tail ( self , groups ) :
"""Evaluate inline comments at the tail of source code ."""
|
if self . lines :
self . line_comments . append ( [ groups [ 'line' ] [ 2 : ] . replace ( '\\\n' , '' ) , self . line_num , self . current_encoding ] )
|
def create ( self , fields ) :
"""Create the object only once .
So , you need loop to usage .
: param ` fields ` is dictionary fields ."""
|
try : # Cleaning the fields , and check if has ` ForeignKey ` type .
cleaned_fields = { }
for key , value in fields . items ( ) :
if type ( value ) is dict :
try :
if value [ 'type' ] == 'fk' :
fake_fk = self . fake_fk ( value [ 'field_name' ] )
cleaned_fields . update ( { key : fake_fk } )
except :
pass
else :
cleaned_fields . update ( { key : value } )
# Creating the object from dictionary fields .
model_class = self . model_class ( )
obj = model_class . objects . create ( ** cleaned_fields )
# The ` ManyToManyField ` need specific object ,
# so i handle it after created the object .
for key , value in fields . items ( ) :
if type ( value ) is dict :
try :
if value [ 'type' ] == 'm2m' :
self . fake_m2m ( obj , value [ 'field_name' ] )
except :
pass
try :
obj . save_m2m ( )
except :
obj . save ( )
return obj
except Exception as e :
raise e
|
def PhenomModel ( self , r ) :
"""Fit to field map
A phenomenological fit by Ryan Bayes ( Glasgow ) to a field map
generated by Bob Wands ( FNAL ) . It assumes a 1 cm plate . This is dated
January 30th , 2012 . Not defined for r < = 0"""
|
if r <= 0 :
raise ValueError
field = self . B0 + self . B1 * G4 . m / r + self . B2 * math . exp ( - 1 * self . H * r / G4 . m )
return field
|
def _calibrate_ir ( radiance , coefs ) :
"""Convert IR radiance to brightness temperature
Reference : [ IR ]
Args :
radiance : Radiance [ mW m - 2 cm - 1 sr - 1]
coefs : Dictionary of calibration coefficients . Keys :
n : The channel ' s central wavenumber [ cm - 1]
a : Offset [ K ]
b : Slope [ 1]
btmin : Minimum brightness temperature threshold [ K ]
btmax : Maximum brightness temperature threshold [ K ]
Returns :
Brightness temperature [ K ]"""
|
logger . debug ( 'Calibrating to brightness temperature' )
# Compute brightness temperature using inverse Planck formula
n = coefs [ 'n' ]
bteff = C2 * n / xu . log ( 1 + C1 * n ** 3 / radiance . where ( radiance > 0 ) )
bt = xr . DataArray ( bteff * coefs [ 'b' ] + coefs [ 'a' ] )
# Apply BT threshold
return bt . where ( xu . logical_and ( bt >= coefs [ 'btmin' ] , bt <= coefs [ 'btmax' ] ) )
|
def setDeviceID ( self , value , device = DEFAULT_DEVICE_ID , message = True ) :
"""Set the hardware device number . This is only needed if more that one
device is on the same serial buss .
: Parameters :
value : ` int `
The device ID to set in the range of 0 - 127.
: Keywords :
device : ` int `
The device is the integer number of the hardware devices ID and
is only used with the Pololu Protocol . Defaults to the hardware ' s
default value .
message : ` bool `
If set to ` True ` a text message will be returned , if set to ` False `
the integer stored in the Qik will be returned .
: Returns :
A text message or an int . See the ` message ` parameter above . If
` value ` and ` device ` are the same ` OK ` or ` 0 ` will be returned
depending on the value of ` message ` .
: Exceptions :
* ` SerialException `
IO error indicating there was a problem reading from the serial
connection ."""
|
return self . _setDeviceID ( value , device , message )
|
def propagate ( p0 , angle , d , deg = True , bearing = False , r = r_earth_mean ) :
"""Given an initial point and angle , move distance d along the surface
Parameters
p0 : point - like ( or array of point - like ) [ lon , lat ] objects
angle : float ( or array of float )
bearing . Note that by default , 0 degrees is due East increasing
clockwise so that 90 degrees is due North . See the bearing flag
to change the meaning of this angle
d : float ( or array of float )
distance to move . The units of d should be consistent with input r
deg : bool , optional ( default True )
Whether both p0 and angle are specified in degrees . The output
points will also match the value of this flag .
bearing : bool , optional ( default False )
Indicates whether to interpret the input angle as the classical
definition of bearing .
r : float , optional ( default r _ earth _ mean )
radius of the sphere
Reference
http : / / www . movable - type . co . uk / scripts / latlong . html - Destination
Note : Spherical earth model . By default uses radius of 6371.0 km ."""
|
single , ( p0 , angle , d ) = _to_arrays ( ( p0 , 2 ) , ( angle , 1 ) , ( d , 1 ) )
if deg :
p0 = np . radians ( p0 )
angle = np . radians ( angle )
if not bearing :
angle = np . pi / 2.0 - angle
lon0 , lat0 = p0 [ : , 0 ] , p0 [ : , 1 ]
angd = d / r
lat1 = arcsin ( sin ( lat0 ) * cos ( angd ) + cos ( lat0 ) * sin ( angd ) * cos ( angle ) )
a = sin ( angle ) * sin ( angd ) * cos ( lat0 )
b = cos ( angd ) - sin ( lat0 ) * sin ( lat1 )
lon1 = lon0 + arctan2 ( a , b )
p1 = np . column_stack ( [ lon1 , lat1 ] )
if deg :
p1 = np . degrees ( p1 )
if single :
p1 = p1 [ 0 ]
return p1
|
def compare_and_set ( self , expect , update ) :
'''Atomically sets the value to ` update ` if the current value is equal to
` expect ` .
: param expect : The expected current value .
: param update : The value to set if and only if ` expect ` equals the
current value .'''
|
with self . _reference . get_lock ( ) :
if self . _reference . value == expect :
self . _reference . value = update
return True
return False
|
def graph_from_dot_file ( path , encoding = None ) :
"""Load graphs from DOT file at ` path ` .
@ param path : to DOT file
@ param encoding : as passed to ` io . open ` .
For example , ` ' utf - 8 ' ` .
@ return : Graphs that result from parsing .
@ rtype : ` list ` of ` pydot . Dot `"""
|
with io . open ( path , 'rt' , encoding = encoding ) as f :
s = f . read ( )
if not PY3 :
s = unicode ( s )
graphs = graph_from_dot_data ( s )
return graphs
|
def GET_blockchain_num_subdomains ( self , path_info , blockchain_name ) :
"""Handle GET / blockchains / : blockchainID / subdomains _ count
Takes ` all = true ` to include expired names
Reply with the number of names on this blockchain"""
|
if blockchain_name != 'bitcoin' : # not supported
self . _reply_json ( { 'error' : 'Unsupported blockchain' } , status_code = 404 )
return
blockstackd_url = get_blockstackd_url ( )
num_names = blockstackd_client . get_num_subdomains ( hostport = blockstackd_url )
if json_is_error ( num_names ) :
if json_is_exception ( num_names ) :
status_code = 406
else :
status_code = 404
self . _reply_json ( { 'error' : num_names [ 'error' ] } , status_code = status_code )
return
self . _reply_json ( { 'names_count' : num_names } )
return
|
def watcher ( self ) -> Watcher :
"""Gives an access to action ' s watcher .
: return : Action ' s watcher instance ."""
|
if not hasattr ( self , "_watcher" ) :
self . _watcher = Watcher ( )
return self . _watcher
|
def purge_old_logs ( delete_before_days = 7 ) :
"""Purges old logs from the database table"""
|
delete_before_date = timezone . now ( ) - timedelta ( days = delete_before_days )
logs_deleted = Log . objects . filter ( created_on__lte = delete_before_date ) . delete ( )
return logs_deleted
|
def _setattr_url_map ( self ) :
'''Set an attribute on the local instance for each key / val in url _ map
CherryPy uses class attributes to resolve URLs .'''
|
if self . apiopts . get ( 'enable_sessions' , True ) is False :
url_blacklist = [ 'login' , 'logout' , 'minions' , 'jobs' ]
else :
url_blacklist = [ ]
urls = ( ( url , cls ) for url , cls in six . iteritems ( self . url_map ) if url not in url_blacklist )
for url , cls in urls :
setattr ( self , url , cls ( ) )
|
def thumbs_up_songs ( self , * , library = True , store = True ) :
"""Get a listing of ' Thumbs Up ' store songs .
Parameters :
library ( bool , Optional ) : Include ' Thumbs Up ' songs from library .
Default : True
generated ( bool , Optional ) : Include ' Thumbs Up ' songs from store .
Default : True
Returns :
list : Dicts of ' Thumbs Up ' songs ."""
|
thumbs_up_songs = [ ]
if library is True :
thumbs_up_songs . extend ( song for song in self . songs ( ) if song . get ( 'rating' , '0' ) == '5' )
if store is True :
response = self . _call ( mc_calls . EphemeralTop )
thumbs_up_songs . extend ( response . body . get ( 'data' , { } ) . get ( 'items' , [ ] ) )
return thumbs_up_songs
|
def setRoute ( self , vehID , edgeList ) :
"""setRoute ( string , list ) - > None
changes the vehicle route to given edges list .
The first edge in the list has to be the one that the vehicle is at at the moment .
example usage :
setRoute ( ' 1 ' , [ ' 1 ' , ' 2 ' , ' 4 ' , ' 6 ' , ' 7 ' ] )
this changes route for vehicle id 1 to edges 1-2-4-6-7"""
|
if isinstance ( edgeList , str ) :
edgeList = [ edgeList ]
self . _connection . _beginMessage ( tc . CMD_SET_VEHICLE_VARIABLE , tc . VAR_ROUTE , vehID , 1 + 4 + sum ( map ( len , edgeList ) ) + 4 * len ( edgeList ) )
self . _connection . _packStringList ( edgeList )
self . _connection . _sendExact ( )
|
def init ( options ) :
"""Initialize some defaults"""
|
# Set matlplotlib ' s backend so LIVVkit can plot to files .
import matplotlib
matplotlib . use ( 'agg' )
livvkit . output_dir = os . path . abspath ( options . out_dir )
livvkit . index_dir = livvkit . output_dir
livvkit . verify = True if options . verify is not None else False
livvkit . validate = True if options . validate is not None else False
livvkit . publish = options . publish
# Get a list of bundles that provide model specific implementations
available_bundles = [ mod for imp , mod , ispkg in pkgutil . iter_modules ( bundles . __path__ ) ]
if options . verify is not None :
livvkit . model_dir = os . path . normpath ( options . verify [ 0 ] )
livvkit . bench_dir = os . path . normpath ( options . verify [ 1 ] )
if not os . path . isdir ( livvkit . model_dir ) :
print ( "" )
print ( "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" )
print ( " UH OH!" )
print ( "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" )
print ( " Your comparison directory does not exist; please check" )
print ( " the path:" )
print ( "\n" + livvkit . model_dir + "\n\n" )
sys . exit ( 1 )
if not os . path . isdir ( livvkit . bench_dir ) :
print ( "" )
print ( "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" )
print ( " UH OH!" )
print ( "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" )
print ( " Your benchmark directory does not exist; please check" )
print ( " the path:" )
print ( "\n" + livvkit . bench_dir + "\n\n" )
sys . exit ( 1 )
livvkit . model_bundle = os . path . basename ( livvkit . model_dir )
livvkit . bench_bundle = os . path . basename ( livvkit . bench_dir )
if livvkit . model_bundle in available_bundles :
livvkit . numerics_model_config = os . path . join ( livvkit . bundle_dir , livvkit . model_bundle , "numerics.json" )
livvkit . numerics_model_module = importlib . import_module ( "." . join ( [ "livvkit.bundles" , livvkit . model_bundle , "numerics" ] ) )
livvkit . verification_model_config = os . path . join ( livvkit . bundle_dir , livvkit . model_bundle , "verification.json" )
livvkit . verification_model_module = importlib . import_module ( "." . join ( [ "livvkit.bundles" , livvkit . model_bundle , "verification" ] ) )
livvkit . performance_model_config = os . path . join ( livvkit . bundle_dir , livvkit . model_bundle , "performance.json" )
# NOTE : This isn ' t used right now . . .
# livvkit . performance _ model _ module = importlib . import _ module (
# " . " . join ( [ " livvkit . bundles " , livvkit . model _ bundle , " performance " ] ) )
else : # TODO : Should implement some error checking here . . .
livvkit . verify = False
if options . validate is not None :
livvkit . validation_model_configs = options . validate
if not ( livvkit . verify or livvkit . validate ) and not options . serve :
print ( "" )
print ( "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" )
print ( " UH OH!" )
print ( "----------------------------------------------------------" )
print ( " No verification or validation tests found/submitted!" )
print ( "" )
print ( " Use either one or both of the --verify and" )
print ( " --validate options to run tests. For more " )
print ( " information use the --help option, view the README" )
print ( " or check https://livvkit.github.io/Docs/" )
print ( "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" )
print ( "" )
sys . exit ( 1 )
return options
|
def get_components_for_species ( alignment , species ) :
"""Return the component for each species in the list ` species ` or None"""
|
# If the number of components in the alignment is less that the requested number
# of species we can immediately fail
if len ( alignment . components ) < len ( species ) :
return None
# Otherwise , build an index of components by species , then lookup
index = dict ( [ ( c . src . split ( '.' ) [ 0 ] , c ) for c in alignment . components ] )
try :
return [ index [ s ] for s in species ]
except :
return None
|
def draw_path_collection ( self , paths , path_coordinates , path_transforms , offsets , offset_coordinates , offset_order , styles , mplobj = None ) :
"""Draw a collection of paths . The paths , offsets , and styles are all
iterables , and the number of paths is max ( len ( paths ) , len ( offsets ) ) .
By default , this is implemented via multiple calls to the draw _ path ( )
function . For efficiency , Renderers may choose to customize this
implementation .
Examples of path collections created by matplotlib are scatter plots ,
histograms , contour plots , and many others .
Parameters
paths : list
list of tuples , where each tuple has two elements :
( data , pathcodes ) . See draw _ path ( ) for a description of these .
path _ coordinates : string
the coordinates code for the paths , which should be either
' data ' for data coordinates , or ' figure ' for figure ( pixel )
coordinates .
path _ transforms : array _ like
an array of shape ( * , 3 , 3 ) , giving a series of 2D Affine
transforms for the paths . These encode translations , rotations ,
and scalings in the standard way .
offsets : array _ like
An array of offsets of shape ( N , 2)
offset _ coordinates : string
the coordinates code for the offsets , which should be either
' data ' for data coordinates , or ' figure ' for figure ( pixel )
coordinates .
offset _ order : string
either " before " or " after " . This specifies whether the offset
is applied before the path transform , or after . The matplotlib
backend equivalent is " before " - > " data " , " after " - > " screen " .
styles : dictionary
A dictionary in which each value is a list of length N , containing
the style ( s ) for the paths .
mplobj : matplotlib object
the matplotlib plot element which generated this collection"""
|
if offset_order == "before" :
raise NotImplementedError ( "offset before transform" )
for tup in self . _iter_path_collection ( paths , path_transforms , offsets , styles ) :
( path , path_transform , offset , ec , lw , fc ) = tup
vertices , pathcodes = path
path_transform = transforms . Affine2D ( path_transform )
vertices = path_transform . transform ( vertices )
# This is a hack :
if path_coordinates == "figure" :
path_coordinates = "points"
style = { "edgecolor" : utils . color_to_hex ( ec ) , "facecolor" : utils . color_to_hex ( fc ) , "edgewidth" : lw , "dasharray" : "10,0" , "alpha" : styles [ 'alpha' ] , "zorder" : styles [ 'zorder' ] }
self . draw_path ( data = vertices , coordinates = path_coordinates , pathcodes = pathcodes , style = style , offset = offset , offset_coordinates = offset_coordinates , mplobj = mplobj )
|
def _create_initial_state ( self , ip , jumpkind ) :
"""Obtain a SimState object for a specific address
Fastpath means the CFG generation will work in an IDA - like way , in which it will not try to execute every
single statement in the emulator , but will just do the decoding job . This is much faster than the old way .
: param int ip : The instruction pointer
: param str jumpkind : The jumpkind upon executing the block
: return : The newly - generated state
: rtype : SimState"""
|
jumpkind = "Ijk_Boring" if jumpkind is None else jumpkind
if self . _initial_state is None :
state = self . project . factory . blank_state ( addr = ip , mode = "fastpath" , add_options = self . _state_add_options , remove_options = self . _state_remove_options , )
self . _initial_state = state
else : # FIXME : self . _ initial _ state is deprecated . This branch will be removed soon
state = self . _initial_state
state . history . jumpkind = jumpkind
self . _reset_state_mode ( state , 'fastpath' )
state . _ip = state . solver . BVV ( ip , self . project . arch . bits )
if jumpkind is not None :
state . history . jumpkind = jumpkind
# THIS IS A HACK FOR MIPS
if ip is not None and self . project . arch . name in ( 'MIPS32' , 'MIPS64' ) : # We assume this is a function start
state . regs . t9 = ip
# TODO there was at one point special logic for the ppc64 table of contents but it seems to have bitrotted
return state
|
def agg_wt_avg ( mat , min_wt = 0.01 , corr_metric = 'spearman' ) :
'''Aggregate a set of replicate profiles into a single signature using
a weighted average .
Args :
mat ( pandas df ) : a matrix of replicate profiles , where the columns are
samples and the rows are features ; columns correspond to the
replicates of a single perturbagen
min _ wt ( float ) : Minimum raw weight when calculating weighted average
corr _ metric ( string ) : Spearman or Pearson ; the correlation method
Returns :
out _ sig ( pandas series ) : weighted average values
upper _ tri _ df ( pandas df ) : the correlations between each profile that went into the signature
raw weights ( pandas series ) : weights before normalization
weights ( pandas series ) : weights after normalization'''
|
assert mat . shape [ 1 ] > 0 , "mat is empty! mat: {}" . format ( mat )
if mat . shape [ 1 ] == 1 :
out_sig = mat
upper_tri_df = None
raw_weights = None
weights = None
else :
assert corr_metric in [ "spearman" , "pearson" ]
# Make correlation matrix column wise
corr_mat = mat . corr ( method = corr_metric )
# Save the values in the upper triangle
upper_tri_df = get_upper_triangle ( corr_mat )
# Calculate weight per replicate
raw_weights , weights = calculate_weights ( corr_mat , min_wt )
# Apply weights to values
weighted_values = mat * weights
out_sig = weighted_values . sum ( axis = 1 )
return out_sig , upper_tri_df , raw_weights , weights
|
def _release_version ( ) :
'''Returns release version'''
|
with io . open ( os . path . join ( SETUP_DIRNAME , 'saltpylint' , 'version.py' ) , encoding = 'utf-8' ) as fh_ :
exec_locals = { }
exec_globals = { }
contents = fh_ . read ( )
if not isinstance ( contents , str ) :
contents = contents . encode ( 'utf-8' )
exec ( contents , exec_globals , exec_locals )
# pylint : disable = exec - used
return exec_locals [ '__version__' ]
|
def _is_null_value ( self , value ) :
"""Check if a given value is ` ` null ` ` .
Criteria for this is based on values that shouldn ' t be included
in the Solr ` ` add ` ` request at all ."""
|
if value is None :
return True
if IS_PY3 : # Python 3 . X
if isinstance ( value , str ) and len ( value ) == 0 :
return True
else : # Python 2 . X
if isinstance ( value , basestring ) and len ( value ) == 0 : # NOQA : F821
return True
# TODO : This should probably be removed when solved in core Solr level ?
return False
|
def overlay_spectra ( model , dataset ) :
"""Run a series of diagnostics on the fitted spectra
Parameters
model : model
best - fit Cannon spectral model
dataset : Dataset
original spectra"""
|
best_flux , best_ivar = draw_spectra ( model , dataset )
coeffs_all , covs , scatters , all_chisqs , pivots , label_vector = model . model
# Overplot original spectra with best - fit spectra
print ( "Overplotting spectra for ten random stars" )
res = dataset . test_flux - best_flux
lambdas = dataset . wl
npix = len ( lambdas )
nstars = best_flux . shape [ 0 ]
pickstars = [ ]
for i in range ( 10 ) :
pickstars . append ( random . randrange ( 0 , nstars - 1 ) )
for i in pickstars :
print ( "Star %s" % i )
ID = dataset . test_ID [ i ]
spec_orig = dataset . test_flux [ i , : ]
bad = dataset . test_flux [ i , : ] == 0
lambdas = np . ma . array ( lambdas , mask = bad , dtype = float )
npix = len ( lambdas . compressed ( ) )
spec_orig = np . ma . array ( dataset . test_flux [ i , : ] , mask = bad )
spec_fit = np . ma . array ( best_flux [ i , : ] , mask = bad )
ivars_orig = np . ma . array ( dataset . test_ivar [ i , : ] , mask = bad )
ivars_fit = np . ma . array ( best_ivar [ i , : ] , mask = bad )
red_chisq = np . sum ( all_chisqs [ : , i ] , axis = 0 ) / ( npix - coeffs_all . shape [ 1 ] )
red_chisq = np . round ( red_chisq , 2 )
fig , axarr = plt . subplots ( 2 )
ax1 = axarr [ 0 ]
im = ax1 . scatter ( lambdas , spec_orig , label = "Orig Spec" , c = 1 / np . sqrt ( ivars_orig ) , s = 10 )
ax1 . scatter ( lambdas , spec_fit , label = "Cannon Spec" , c = 'r' , s = 10 )
ax1 . errorbar ( lambdas , spec_fit , yerr = 1 / np . sqrt ( ivars_fit ) , fmt = 'ro' , ms = 1 , alpha = 0.7 )
ax1 . set_xlabel ( r"Wavelength $\lambda (\AA)$" )
ax1 . set_ylabel ( "Normalized flux" )
ax1 . set_title ( "Spectrum Fit: %s" % ID )
ax1 . set_title ( "Spectrum Fit" )
ax1 . set_xlim ( min ( lambdas . compressed ( ) ) - 10 , max ( lambdas . compressed ( ) ) + 10 )
ax1 . legend ( loc = 'lower center' , fancybox = True , shadow = True )
ax2 = axarr [ 1 ]
ax2 . scatter ( spec_orig , spec_fit , c = 1 / np . sqrt ( ivars_orig ) , alpha = 0.7 )
ax2 . errorbar ( spec_orig , spec_fit , yerr = 1 / np . sqrt ( ivars_fit ) , ecolor = 'k' , fmt = "none" , ms = 1 , alpha = 0.7 )
# fig . subplots _ adjust ( right = 0.8)
# cbar _ ax = fig . add _ axes ( [ 0.85 , 0.15 , 0.05 , 0.7 ] )
fig . colorbar ( )
# fig . colorbar (
# im , cax = cbar _ ax ,
# label = " Uncertainties on the Fluxes from the Original Spectrum " )
xlims = ax2 . get_xlim ( )
ylims = ax2 . get_ylim ( )
lims = [ np . min ( [ xlims , ylims ] ) , np . max ( [ xlims , ylims ] ) ]
ax2 . plot ( lims , lims , 'k-' , alpha = 0.75 )
textstr = "Red Chi Sq: %s" % red_chisq
props = dict ( boxstyle = 'round' , facecolor = 'palevioletred' , alpha = 0.5 )
ax2 . text ( 0.05 , 0.95 , textstr , transform = ax2 . transAxes , fontsize = 14 , verticalalignment = 'top' , bbox = props )
ax2 . set_xlim ( xlims )
ax2 . set_ylim ( ylims )
ax2 . set_xlabel ( "Orig Fluxes" )
ax2 . set_ylabel ( "Fitted Fluxes" )
plt . tight_layout ( )
filename = "best_fit_spec_Star%s.png" % i
print ( "Saved as %s" % filename )
fig . savefig ( filename )
plt . close ( fig )
|
def integer_merge ( number_list ) :
"""A Python function to transform a list of multiple integers into a single combined integer .
> > > integer _ merge ( [ 1 , 2 , 3 ] )
123
> > > integer _ merge ( [ 4 , 5 , 6 ] )
456
> > > integer _ merge ( [ 7 , 8 , 9 ] )
789"""
|
string_list = [ str ( num ) for num in number_list ]
unified_number = int ( '' . join ( string_list ) )
return unified_number
|
def _set_inspection ( self , v , load = False ) :
"""Setter method for inspection , mapped from YANG variable / interface / port _ channel / ip / arp / inspection ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ inspection is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ inspection ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = inspection . inspection , is_container = 'container' , presence = False , yang_name = "inspection" , rest_name = "inspection" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Set arp inspection flag' , u'cli-incomplete-no' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-dai' , defining_module = 'brocade-dai' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """inspection must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=inspection.inspection, is_container='container', presence=False, yang_name="inspection", rest_name="inspection", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set arp inspection flag', u'cli-incomplete-no': None}}, namespace='urn:brocade.com:mgmt:brocade-dai', defining_module='brocade-dai', yang_type='container', is_config=True)""" , } )
self . __inspection = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_canonical_correlations ( dataframe , column_types ) :
'''computes the correlation coefficient between each distinct pairing of columns
preprocessing note :
any rows with missing values ( in either paired column ) are dropped for that pairing
categorical columns are replaced with one - hot encoded columns
any columns which have only one distinct value ( after dropping missing values ) are skipped
returns a list of the pairwise canonical correlation coefficients'''
|
def preprocess ( series ) :
if column_types [ series . name ] == 'CATEGORICAL' :
series = pd . get_dummies ( series )
array = series . values . reshape ( series . shape [ 0 ] , - 1 )
return array
if dataframe . shape [ 1 ] < 2 :
return [ ]
correlations = [ ]
skip_cols = set ( )
for col_name_i , col_name_j in itertools . combinations ( dataframe . columns , 2 ) :
if col_name_i in skip_cols or col_name_j in skip_cols :
correlations . append ( 0 )
continue
df_ij = dataframe [ [ col_name_i , col_name_j ] ] . dropna ( axis = 0 , how = "any" )
col_i = df_ij [ col_name_i ]
col_j = df_ij [ col_name_j ]
if np . unique ( col_i ) . shape [ 0 ] <= 1 :
skip_cols . add ( col_name_i )
correlations . append ( 0 )
continue
if np . unique ( col_j ) . shape [ 0 ] <= 1 :
skip_cols . add ( col_name_j )
correlations . append ( 0 )
continue
col_i = preprocess ( col_i )
col_j = preprocess ( col_j )
col_i_c , col_j_c = CCA ( n_components = 1 ) . fit_transform ( col_i , col_j )
if np . unique ( col_i_c ) . shape [ 0 ] <= 1 or np . unique ( col_j_c ) . shape [ 0 ] <= 1 :
c = 0
else :
c = np . corrcoef ( col_i_c . T , col_j_c . T ) [ 0 , 1 ]
correlations . append ( c )
return correlations
|
def put ( self , destination ) :
"""Copy the referenced directory to this path
The semantics of this command are similar to unix ` ` cp ` ` : if ` ` destination ` ` already
exists , the copied directory will be put at ` ` [ destination ] / / [ basename ( localpath ) ] ` ` . If
it does not already exist , the directory will be renamed to this path ( the parent directory
must exist ) .
Args :
destination ( str ) : path to put this directory"""
|
target = get_target_path ( destination , self . localpath )
shutil . copytree ( self . localpath , target )
|
def serialize ( self , message ) :
"""Serialize the given message in - place , converting inputs to outputs .
We do this in - place for performance reasons . There are more fields in
a message than there are L { Field } objects because of the timestamp ,
task _ level and task _ uuid fields . By only iterating over our L { Fields }
we therefore reduce the number of function calls in a critical code
path .
@ param message : A C { dict } ."""
|
for key , field in self . fields . items ( ) :
message [ key ] = field . serialize ( message [ key ] )
|
def unregister_signals_oaiset ( self ) :
"""Unregister signals oaiset ."""
|
from . models import OAISet
from . receivers import after_insert_oai_set , after_update_oai_set , after_delete_oai_set
if contains ( OAISet , 'after_insert' , after_insert_oai_set ) :
remove ( OAISet , 'after_insert' , after_insert_oai_set )
remove ( OAISet , 'after_update' , after_update_oai_set )
remove ( OAISet , 'after_delete' , after_delete_oai_set )
|
def decode_tx_packet ( packet : str ) -> dict :
"""Break packet down into primitives , and do basic interpretation .
> > > decode _ packet ( ' 10 ; Kaku ; ID = 41 ; SWITCH = 1 ; CMD = ON ; ' ) = = {
. . . ' node ' : ' gateway ' ,
. . . ' protocol ' : ' kaku ' ,
. . . ' id ' : ' 000041 ' ,
. . . ' switch ' : ' 1 ' ,
. . . ' command ' : ' on ' ,
True"""
|
node_id , protocol , attrs = packet . split ( DELIM , 2 )
data = cast ( Dict [ str , Any ] , { 'node' : PacketHeader ( node_id ) . name , } )
data [ 'protocol' ] = protocol . lower ( )
for i , attr in enumerate ( filter ( None , attrs . strip ( DELIM ) . split ( DELIM ) ) ) :
if i == 0 :
data [ 'id' ] = attr
if i == 1 :
data [ 'switch' ] = attr
if i == 2 :
data [ 'command' ] = attr
# correct KaKu device address
if data . get ( 'protocol' , '' ) == 'kaku' and len ( data [ 'id' ] ) != 6 :
data [ 'id' ] = '0000' + data [ 'id' ]
return data
|
def modify_number_pattern ( number_pattern , ** kwargs ) :
"""Modifies a number pattern by specified keyword arguments ."""
|
params = [ 'pattern' , 'prefix' , 'suffix' , 'grouping' , 'int_prec' , 'frac_prec' , 'exp_prec' , 'exp_plus' ]
for param in params :
if param in kwargs :
continue
kwargs [ param ] = getattr ( number_pattern , param )
return NumberPattern ( ** kwargs )
|
def evaluate_strings ( self , groups ) :
"""Evaluate strings ."""
|
if self . strings :
encoding = self . current_encoding
if self . generic_mode : # Generic assumes no escapes rules .
self . quoted_strings . append ( [ groups [ 'strings' ] [ 1 : - 1 ] , self . line_num , encoding ] )
else :
value = groups [ 'strings' ]
stype = set ( )
if value . endswith ( '"' ) :
stype = self . get_string_type ( value [ : value . index ( '"' ) ] . lower ( ) . replace ( '8' , '' ) )
if not self . match_string ( stype ) or value . endswith ( "'" ) :
return
if 'r' in stype : # Handle raw strings . We can handle even if decoding is disabled .
olen = len ( groups . get ( 'raw' ) ) + len ( groups . get ( 'delim' ) ) + 2
clen = len ( groups . get ( 'delim' ) ) + 2
value = self . norm_nl ( value [ olen : - clen ] . replace ( '\x00' , '\n' ) )
elif ( self . decode_escapes and not value . startswith ( ( '\'' , '"' ) ) and 'l' not in stype ) : # Decode Unicode string . May have added unsupported chars , so use ` UTF - 8 ` .
value , encoding = self . evaluate_unicode ( value )
elif self . decode_escapes : # Decode normal strings .
value , encoding = self . evaluate_normal ( value )
else : # Don ' t decode and just return string content .
value = self . norm_nl ( value [ value . index ( '"' ) + 1 : - 1 ] ) . replace ( '\x00' , '\n' )
if value :
self . quoted_strings . append ( [ value , self . line_num , encoding ] )
|
def tokenize_sentences ( self , untokenized_string : str ) :
"""Tokenize sentences by reading trained tokenizer and invoking
` ` PunktSentenceTokenizer ( ) ` ` .
: type untokenized _ string : str
: param untokenized _ string : A string containing one of more sentences .
: rtype : list of strings"""
|
# load tokenizer
assert isinstance ( untokenized_string , str ) , 'Incoming argument must be a string.'
if self . language == 'latin' :
self . models_path = self . _get_models_path ( self . language )
try :
self . model = open_pickle ( os . path . expanduser ( os . path . join ( self . models_path , 'latin_punkt.pickle' ) ) )
except FileNotFoundError as err :
raise type ( err ) ( TokenizeSentence . missing_models_message + self . models_path )
tokenizer = self . model
tokenizer . _lang_vars = self . lang_vars
elif self . language == 'greek' : # Workaround for regex tokenizer
self . sent_end_chars = GreekLanguageVars . sent_end_chars
self . sent_end_chars_regex = '|' . join ( self . sent_end_chars )
self . pattern = rf'(?<=[{self.sent_end_chars_regex}])\s'
else :
tokenizer = open_pickle ( self . tokenizer_path )
tokenizer = self . _setup_tokenizer ( tokenizer )
# mk list of tokenized sentences
if self . language == 'latin' :
return tokenizer . tokenize ( untokenized_string )
elif self . language == 'greek' :
return re . split ( self . pattern , untokenized_string )
else :
tokenized_sentences = [ sentence for sentence in tokenizer . sentences_from_text ( untokenized_string , realign_boundaries = True ) ]
return tokenized_sentences
|
def form ( ** kwargs : Question ) :
"""Create a form with multiple questions .
The parameter name of a question will be the key for the answer in
the returned dict ."""
|
return Form ( * ( FormField ( k , q ) for k , q in kwargs . items ( ) ) )
|
def _condition_to_tag_check ( self , sample , base_sc_name , mapping , scraper_config , tags = None ) :
"""Metrics from kube - state - metrics have changed
For example :
kube _ node _ status _ condition { condition = " Ready " , node = " ip - 172-33-39-189 . eu - west - 1 . compute " , status = " true " } 1
kube _ node _ status _ condition { condition = " OutOfDisk " , node = " ip - 172-33-57-130 . eu - west - 1 . compute " , status = " false " } 1
metric {
label { name : " condition " , value : " true "
# other labels here
gauge { value : 1.0 }
This function evaluates metrics containing conditions and sends a service check
based on a provided condition - > check mapping dict"""
|
if bool ( sample [ self . SAMPLE_VALUE ] ) is False :
return
# Ignore if gauge is not 1 and we are not processing the pod phase check
label_value , condition_map = self . _get_metric_condition_map ( base_sc_name , sample [ self . SAMPLE_LABELS ] )
service_check_name = condition_map [ 'service_check_name' ]
mapping = condition_map [ 'mapping' ]
node = self . _label_to_tag ( 'node' , sample [ self . SAMPLE_LABELS ] , scraper_config )
condition = self . _label_to_tag ( 'condition' , sample [ self . SAMPLE_LABELS ] , scraper_config )
message = "{} is currently reporting {} = {}" . format ( node , condition , label_value )
if condition_map [ 'service_check_name' ] is None :
self . log . debug ( "Unable to handle {} - unknown condition {}" . format ( service_check_name , label_value ) )
else :
self . service_check ( service_check_name , mapping [ label_value ] , tags = tags , message = message )
|
def get_authorization ( self , request ) :
"""This function extracts the authorization JWT string . It first
looks for specified key in header and then looks
for the same in body part .
Parameters
request : HttpRequest
This is the raw request that user has sent .
Returns
auth : str
Return request ' s ' JWT _ AUTH _ KEY : ' content from body or
Header , as a bytestring .
Hide some test client ickyness where the header can be unicode ."""
|
from django . utils . six import text_type
from rest_framework import HTTP_HEADER_ENCODING
auth = request . data . get ( self . key , b'' ) or request . META . get ( self . header_key , b'' )
if isinstance ( auth , text_type ) : # Work around django test client oddness
auth = auth . encode ( HTTP_HEADER_ENCODING )
return auth
|
def get_input ( problem ) :
"""" Returns the specified problem answer in the form
problem : problem id
Returns string , or bytes if a file is loaded"""
|
input_data = load_input ( )
pbsplit = problem . split ( ":" )
problem_input = input_data [ 'input' ] [ pbsplit [ 0 ] ]
if isinstance ( problem_input , dict ) and "filename" in problem_input and "value" in problem_input :
if len ( pbsplit ) > 1 and pbsplit [ 1 ] == 'filename' :
return problem_input [ "filename" ]
else :
return open ( problem_input [ "value" ] , 'rb' ) . read ( )
else :
return problem_input
|
def _call ( self , coeffs ) :
"""Return the inverse wavelet transform of ` ` coeffs ` ` ."""
|
if self . impl == 'pywt' :
coeffs = pywt . unravel_coeffs ( coeffs , coeff_slices = self . _coeff_slices , coeff_shapes = self . _coeff_shapes , output_format = 'wavedecn' )
recon = pywt . waverecn ( coeffs , wavelet = self . pywt_wavelet , mode = self . pywt_pad_mode , axes = self . axes )
recon_shape = self . range . shape
if recon . shape != recon_shape : # If the original shape was odd along any transformed axes it
# will have been rounded up to the next even size after the
# reconstruction . The extra sample should be discarded .
# The underlying reason is decimation by two in reconstruction
# must keep ceil ( N / 2 ) samples in each band for perfect
# reconstruction . Reconstruction then upsamples by two .
# When N is odd , ( 2 * np . ceil ( N / 2 ) ) ! = N .
recon_slc = [ ]
for i , ( n_recon , n_intended ) in enumerate ( zip ( recon . shape , recon_shape ) ) :
if n_recon == n_intended + 1 : # Upsampling added one entry too much in this axis ,
# drop last one
recon_slc . append ( slice ( - 1 ) )
elif n_recon == n_intended :
recon_slc . append ( slice ( None ) )
else :
raise ValueError ( 'in axis {}: expected size {} or {} in ' '`recon_shape`, got {}' '' . format ( i , n_recon - 1 , n_recon , n_intended ) )
recon = recon [ tuple ( recon_slc ) ]
return recon
else :
raise RuntimeError ( "bad `impl` '{}'" . format ( self . impl ) )
|
def _ResolveRelativeImport ( name , package ) :
"""Resolves a relative import into an absolute path .
This is mostly an adapted version of the logic found in the backported
version of import _ module in Python 2.7.
https : / / github . com / python / cpython / blob / 2.7 / Lib / importlib / _ _ init _ _ . py
Args :
name : relative name imported , such as ' . a ' or ' . . b . c '
package : absolute package path , such as ' a . b . c . d . e '
Returns :
The absolute path of the name to be imported , or None if it is invalid .
Examples :
_ ResolveRelativeImport ( ' . c ' , ' a . b ' ) - > ' a . b . c '
_ ResolveRelativeImport ( ' . . c ' , ' a . b ' ) - > ' a . c '
_ ResolveRelativeImport ( ' . . . c ' , ' a . c ' ) - > None"""
|
level = sum ( 1 for c in itertools . takewhile ( lambda c : c == '.' , name ) )
if level == 1 :
return package + name
else :
parts = package . split ( '.' ) [ : - ( level - 1 ) ]
if not parts :
return None
parts . append ( name [ level : ] )
return '.' . join ( parts )
|
def setup_and_run_analysis ( self ) :
"""Execute analysis after the tab is displayed .
Please check the code in dock . py accept ( ) . It should follow
approximately the same code ."""
|
self . show_busy ( )
# Read user ' s settings
self . read_settings ( )
# Prepare impact function from wizard dialog user input
self . impact_function = self . prepare_impact_function ( )
# Prepare impact function
status , message = self . impact_function . prepare ( )
message = basestring_to_message ( message )
# Check status
if status == PREPARE_FAILED_BAD_INPUT :
self . hide_busy ( )
LOGGER . warning ( tr ( 'The impact function will not be able to run because of the ' 'inputs.' ) )
LOGGER . warning ( message . to_text ( ) )
send_error_message ( self , message )
return status , message
if status == PREPARE_FAILED_BAD_CODE :
self . hide_busy ( )
LOGGER . warning ( tr ( 'The impact function was not able to be prepared because of a ' 'bug.' ) )
LOGGER . exception ( message . to_text ( ) )
send_error_message ( self , message )
return status , message
# Start the analysis
status , message = self . impact_function . run ( )
message = basestring_to_message ( message )
# Check status
if status == ANALYSIS_FAILED_BAD_INPUT :
self . hide_busy ( )
LOGGER . warning ( tr ( 'The impact function could not run because of the inputs.' ) )
LOGGER . warning ( message . to_text ( ) )
send_error_message ( self , message )
return status , message
elif status == ANALYSIS_FAILED_BAD_CODE :
self . hide_busy ( )
LOGGER . warning ( tr ( 'The impact function could not run because of a bug.' ) )
LOGGER . exception ( message . to_text ( ) )
send_error_message ( self , message )
return status , message
LOGGER . info ( tr ( 'The impact function could run without errors.' ) )
# Add result layer to QGIS
add_impact_layers_to_canvas ( self . impact_function , iface = self . parent . iface )
# Some if - s i . e . zoom , debug , hide exposure
if self . zoom_to_impact_flag :
self . iface . zoomToActiveLayer ( )
qgis_exposure = ( QgsProject . instance ( ) . mapLayer ( self . parent . exposure_layer . id ( ) ) )
if self . hide_exposure_flag and qgis_exposure is not None :
treeroot = QgsProject . instance ( ) . layerTreeRoot ( )
treelayer = treeroot . findLayer ( qgis_exposure . id ( ) )
if treelayer :
treelayer . setItemVisibilityChecked ( False )
# we only want to generate non pdf / qpt report
html_components = [ standard_impact_report_metadata_html ]
error_code , message = self . impact_function . generate_report ( html_components )
message = basestring_to_message ( message )
if error_code == ImpactReport . REPORT_GENERATION_FAILED :
self . hide_busy ( )
LOGGER . info ( tr ( 'The impact report could not be generated.' ) )
send_error_message ( self , message )
LOGGER . exception ( message . to_text ( ) )
return ANALYSIS_FAILED_BAD_CODE , message
self . extent . set_last_analysis_extent ( self . impact_function . analysis_extent , qgis_exposure . crs ( ) )
# Hide busy
self . hide_busy ( )
# Setup gui if analysis is done
self . setup_gui_analysis_done ( )
return ANALYSIS_SUCCESS , None
|
def cumstd ( expr , sort = None , ascending = True , unique = False , preceding = None , following = None ) :
"""Calculate cumulative standard deviation of a sequence expression .
: param expr : expression for calculation
: param sort : name of the sort column
: param ascending : whether to sort in ascending order
: param unique : whether to eliminate duplicate entries
: param preceding : the start point of a window
: param following : the end point of a window
: return : calculated column"""
|
data_type = _stats_type ( expr )
return _cumulative_op ( expr , CumStd , sort = sort , ascending = ascending , unique = unique , preceding = preceding , following = following , data_type = data_type )
|
def list_pkgs ( versions_as_list = False , ** kwargs ) :
'''List the packages currently installed in a dict : :
{ ' < package _ name > ' : ' < version > ' }
CLI Example :
. . code - block : : bash
salt ' * ' pkg . list _ pkgs'''
|
versions_as_list = salt . utils . data . is_true ( versions_as_list )
# not yet implemented or not applicable
if any ( [ salt . utils . data . is_true ( kwargs . get ( x ) ) for x in ( 'removed' , 'purge_desired' ) ] ) :
return { }
if 'pkg.list_pkgs' in __context__ :
if versions_as_list :
return __context__ [ 'pkg.list_pkgs' ]
else :
ret = copy . deepcopy ( __context__ [ 'pkg.list_pkgs' ] )
__salt__ [ 'pkg_resource.stringify' ] ( ret )
return ret
ret = { }
pkgs = _vartree ( ) . dbapi . cpv_all ( )
for cpv in pkgs :
__salt__ [ 'pkg_resource.add_pkg' ] ( ret , _cpv_to_cp ( cpv ) , _cpv_to_version ( cpv ) )
__salt__ [ 'pkg_resource.sort_pkglist' ] ( ret )
__context__ [ 'pkg.list_pkgs' ] = copy . deepcopy ( ret )
if not versions_as_list :
__salt__ [ 'pkg_resource.stringify' ] ( ret )
return ret
|
def stack_outputs ( self , name ) :
"""Given a name , describes CloudFront stacks and returns dict of the stack Outputs
, else returns an empty dict ."""
|
try :
stack = self . cf_client . describe_stacks ( StackName = name ) [ 'Stacks' ] [ 0 ]
return { x [ 'OutputKey' ] : x [ 'OutputValue' ] for x in stack [ 'Outputs' ] }
except botocore . client . ClientError :
return { }
|
def initialize ( self , * args , ** kwargs ) :
"""Call self . _ initialize with ` self ` made available to Zipline API
functions ."""
|
with ZiplineAPI ( self ) :
self . _initialize ( self , * args , ** kwargs )
|
def settle ( self ) :
"""收盘后的结算事件"""
|
self . volume_long_his += self . volume_long_today
self . volume_long_today = 0
self . volume_long_frozen_today = 0
self . volume_short_his += self . volume_short_today
self . volume_short_today = 0
self . volume_short_frozen_today = 0
|
def from_interval ( self , startnote , shorthand , up = True ) :
"""Shortcut to from _ interval _ shorthand ."""
|
return self . from_interval_shorthand ( startnote , shorthand , up )
|
def show ( self ) :
"""This could use some love , it ' s currently here as reference"""
|
for entry in self . _entries :
print "{'%s': %s, 'records': %s}" % ( entry . _record_type , entry . host , entry . records )
print
|
def delete ( self , cascade_delete = None , access_key = None , secret_key = None , verbose = None , debug = None , retries = None , headers = None ) :
"""Delete a file from the Archive . Note : Some files - - such as
< itemname > _ meta . xml - - cannot be deleted .
: type cascade _ delete : bool
: param cascade _ delete : ( optional ) Also deletes files derived from the file , and
files the file was derived from .
: type access _ key : str
: param access _ key : ( optional ) IA - S3 access _ key to use when making the given
request .
: type secret _ key : str
: param secret _ key : ( optional ) IA - S3 secret _ key to use when making the given
request .
: type verbose : bool
: param verbose : ( optional ) Print actions to stdout .
: type debug : bool
: param debug : ( optional ) Set to True to print headers to stdout and exit exit
without sending the delete request ."""
|
cascade_delete = '0' if not cascade_delete else '1'
access_key = self . item . session . access_key if not access_key else access_key
secret_key = self . item . session . secret_key if not secret_key else secret_key
debug = False if not debug else debug
verbose = False if not verbose else verbose
max_retries = 2 if retries is None else retries
headers = dict ( ) if headers is None else headers
if 'x-archive-cascade-delete' not in headers :
headers [ 'x-archive-cascade-delete' ] = cascade_delete
url = '{0}//s3.us.archive.org/{1}/{2}' . format ( self . item . session . protocol , self . identifier , self . name )
self . item . session . mount_http_adapter ( max_retries = max_retries , status_forcelist = [ 503 ] , host = 's3.us.archive.org' )
request = iarequest . S3Request ( method = 'DELETE' , url = url , headers = headers , access_key = access_key , secret_key = secret_key )
if debug :
return request
else :
if verbose :
msg = ' deleting: {0}' . format ( self . name )
if cascade_delete :
msg += ' and all derivative files.'
print ( msg , file = sys . stderr )
prepared_request = self . item . session . prepare_request ( request )
try :
resp = self . item . session . send ( prepared_request )
resp . raise_for_status ( )
except ( RetryError , HTTPError , ConnectTimeout , ConnectionError , socket . error , ReadTimeout ) as exc :
error_msg = 'Error deleting {0}, {1}' . format ( url , exc )
log . error ( error_msg )
raise
else :
return resp
finally : # The retry adapter is mounted to the session object .
# Make sure to remove it after delete , so it isn ' t
# mounted if and when the session object is used for an
# upload . This is important because we use custom retry
# handling for IA - S3 uploads .
url_prefix = '{0}//s3.us.archive.org' . format ( self . item . session . protocol )
del self . item . session . adapters [ url_prefix ]
|
def lhoodplot ( self , trsig = None , fig = None , piechart = True , figsize = None , logscale = True , constraints = 'all' , suptitle = None , Ltot = None , maxdur = None , maxslope = None , inverse = False , colordict = None , cachefile = None , nbins = 20 , dur_range = None , slope_range = None , depth_range = None , recalc = False , ** kwargs ) :
"""Makes plot of likelihood density function , optionally with transit signal
If ` ` trsig ` ` not passed , then just density plot of the likelidhoo
will be made ; if it is passed , then it will be plotted
over the density plot .
: param trsig : ( optional )
: class : ` vespa . TransitSignal ` object .
: param fig : ( optional )
Argument for : func : ` plotutils . setfig ` .
: param piechart : ( optional )
Whether to include a plot of the piechart that describes
the effect of the constraints on the population .
: param figsize : ( optional )
Passed to : func : ` plotutils . setfig ` .
: param logscale : ( optional )
If ` ` True ` ` , then shading will be based on the log - histogram
( thus showing more detail at low density ) . Passed to
: func : ` vespa . stars . StarPopulation . prophist2d ` .
: param constraints : ( ` ` ' all ' , ' none ' ` ` or ` ` list ` ` ; optional )
Which constraints to apply in making plot . Picking
specific constraints allows you to visualize in more
detail what the effect of a constraint is .
: param suptitle : ( optional )
Title for the figure .
: param Ltot : ( optional )
Total of ` ` prior * likelihood ` ` for all models . If this is
passed , then " Probability of scenario " gets a text box
in the middle .
: param inverse : ( optional )
Intended to allow showing only the instances that are
ruled out , rather than those that remain . Not sure if this
works anymore .
: param colordict : ( optional )
Dictionary to define colors of constraints to be used
in pie chart . Intended to unify constraint colors among
different models .
: param cachefile : ( optional )
Likelihood calculation cache file .
: param nbins : ( optional )
Number of bins with which to make the 2D histogram plot ;
passed to : func : ` vespa . stars . StarPopulation . prophist2d ` .
: param dur _ range , slope _ range , depth _ range : ( optional )
Define ranges of plots .
: param * * kwargs :
Additional keyword arguments passed to
: func : ` vespa . stars . StarPopulation . prophist2d ` ."""
|
setfig ( fig , figsize = figsize )
if trsig is not None :
dep , ddep = trsig . logdepthfit
dur , ddur = trsig . durfit
slope , dslope = trsig . slopefit
ddep = ddep . reshape ( ( 2 , 1 ) )
ddur = ddur . reshape ( ( 2 , 1 ) )
dslope = dslope . reshape ( ( 2 , 1 ) )
if dur_range is None :
dur_range = ( 0 , dur * 2 )
if slope_range is None :
slope_range = ( 2 , slope * 2 )
if constraints == 'all' :
mask = self . distok
elif constraints == 'none' :
mask = np . ones ( len ( self . stars ) ) . astype ( bool )
else :
mask = np . ones ( len ( self . stars ) ) . astype ( bool )
for c in constraints :
if c not in self . distribution_skip :
mask &= self . constraints [ c ] . ok
if inverse :
mask = ~ mask
if dur_range is None :
dur_range = ( self . stars [ mask ] [ 'duration' ] . min ( ) , self . stars [ mask ] [ 'duration' ] . max ( ) )
if slope_range is None :
slope_range = ( 2 , self . stars [ mask ] [ 'slope' ] . max ( ) )
if depth_range is None :
depth_range = ( - 5 , - 0.1 )
# This may mess with intended " inverse " behavior , probably ?
mask &= ( ( self . stars [ 'duration' ] > dur_range [ 0 ] ) & ( self . stars [ 'duration' ] < dur_range [ 1 ] ) )
mask &= ( ( self . stars [ 'duration' ] > dur_range [ 0 ] ) & ( self . stars [ 'duration' ] < dur_range [ 1 ] ) )
mask &= ( ( self . stars [ 'slope' ] > slope_range [ 0 ] ) & ( self . stars [ 'slope' ] < slope_range [ 1 ] ) )
mask &= ( ( self . stars [ 'slope' ] > slope_range [ 0 ] ) & ( self . stars [ 'slope' ] < slope_range [ 1 ] ) )
mask &= ( ( np . log10 ( self . depth ) > depth_range [ 0 ] ) & ( np . log10 ( self . depth ) < depth_range [ 1 ] ) )
mask &= ( ( np . log10 ( self . depth ) > depth_range [ 0 ] ) & ( np . log10 ( self . depth ) < depth_range [ 1 ] ) )
if piechart :
a_pie = plt . axes ( [ 0.07 , 0.5 , 0.4 , 0.5 ] )
self . constraint_piechart ( fig = 0 , colordict = colordict )
ax1 = plt . subplot ( 222 )
if not self . is_ruled_out :
self . prophist2d ( 'duration' , 'depth' , logy = True , fig = 0 , mask = mask , interpolation = 'bicubic' , logscale = logscale , nbins = nbins , ** kwargs )
if trsig is not None :
plt . errorbar ( dur , dep , xerr = ddur , yerr = ddep , color = 'w' , marker = 'x' , ms = 12 , mew = 3 , lw = 3 , capsize = 3 , mec = 'w' )
plt . errorbar ( dur , dep , xerr = ddur , yerr = ddep , color = 'r' , marker = 'x' , ms = 10 , mew = 1.5 )
plt . ylabel ( r'log($\delta$)' )
plt . xlabel ( '' )
plt . xlim ( dur_range )
plt . ylim ( depth_range )
yt = ax1 . get_yticks ( )
plt . yticks ( yt [ 1 : ] )
xt = ax1 . get_xticks ( )
plt . xticks ( xt [ 2 : - 1 : 2 ] )
ax3 = plt . subplot ( 223 )
if not self . is_ruled_out :
self . prophist2d ( 'depth' , 'slope' , logx = True , fig = 0 , mask = mask , interpolation = 'bicubic' , logscale = logscale , nbins = nbins , ** kwargs )
if trsig is not None :
plt . errorbar ( dep , slope , xerr = ddep , yerr = dslope , color = 'w' , marker = 'x' , ms = 12 , mew = 3 , lw = 3 , capsize = 3 , mec = 'w' )
plt . errorbar ( dep , slope , xerr = ddep , yerr = dslope , color = 'r' , marker = 'x' , ms = 10 , mew = 1.5 )
plt . ylabel ( r'$T/\tau$' )
plt . xlabel ( r'log($\delta$)' )
plt . ylim ( slope_range )
plt . xlim ( depth_range )
yt = ax3 . get_yticks ( )
plt . yticks ( yt [ 1 : ] )
ax4 = plt . subplot ( 224 )
if not self . is_ruled_out :
self . prophist2d ( 'duration' , 'slope' , fig = 0 , mask = mask , interpolation = 'bicubic' , logscale = logscale , nbins = nbins , ** kwargs )
if trsig is not None :
plt . errorbar ( dur , slope , xerr = ddur , yerr = dslope , color = 'w' , marker = 'x' , ms = 12 , mew = 3 , lw = 3 , capsize = 3 , mec = 'w' )
plt . errorbar ( dur , slope , xerr = ddur , yerr = dslope , color = 'r' , marker = 'x' , ms = 10 , mew = 1.5 )
plt . ylabel ( '' )
plt . xlabel ( r'$T$ [days]' )
plt . ylim ( slope_range )
plt . xlim ( dur_range )
plt . xticks ( xt [ 2 : - 1 : 2 ] )
plt . yticks ( ax3 . get_yticks ( ) )
ticklabels = ax1 . get_xticklabels ( ) + ax4 . get_yticklabels ( )
plt . setp ( ticklabels , visible = False )
plt . subplots_adjust ( hspace = 0.001 , wspace = 0.001 )
if suptitle is None :
suptitle = self . model
plt . suptitle ( suptitle , fontsize = 20 )
if Ltot is not None :
lhood = self . lhood ( trsig , recalc = recalc )
plt . annotate ( '%s:\nProbability\nof scenario: %.3f' % ( trsig . name , self . prior * lhood / Ltot ) , xy = ( 0.5 , 0.5 ) , ha = 'center' , va = 'center' , bbox = dict ( boxstyle = 'round' , fc = 'w' ) , xycoords = 'figure fraction' , fontsize = 15 )
|
def __addNode ( self , name , cls ) :
'''Add a node to the topology'''
|
if name in self . nodes :
raise Exception ( "A node by the name {} already exists. Can't add a duplicate." . format ( name ) )
self . __nxgraph . add_node ( name )
self . __nxgraph . node [ name ] [ 'label' ] = name
self . __nxgraph . node [ name ] [ 'nodeobj' ] = cls ( )
self . __nxgraph . node [ name ] [ 'type' ] = cls . __name__
|
def add_group ( group_name , system_group = False , gid = None ) :
"""Add a group to the system
Will log but otherwise succeed if the group already exists .
: param str group _ name : group to create
: param bool system _ group : Create system group
: param int gid : GID for user being created
: returns : The password database entry struct , as returned by ` grp . getgrnam `"""
|
try :
group_info = grp . getgrnam ( group_name )
log ( 'group {0} already exists!' . format ( group_name ) )
if gid :
group_info = grp . getgrgid ( gid )
log ( 'group with gid {0} already exists!' . format ( gid ) )
except KeyError :
log ( 'creating group {0}' . format ( group_name ) )
add_new_group ( group_name , system_group , gid )
group_info = grp . getgrnam ( group_name )
return group_info
|
def postprocess ( self ) :
"""Postprocessing includes renaming and gzipping where necessary .
Also copies the magmom to the incar if necessary"""
|
for f in VASP_OUTPUT_FILES + [ self . output_file ] :
if os . path . exists ( f ) :
if self . final and self . suffix != "" :
shutil . move ( f , "{}{}" . format ( f , self . suffix ) )
elif self . suffix != "" :
shutil . copy ( f , "{}{}" . format ( f , self . suffix ) )
if self . copy_magmom and not self . final :
try :
outcar = Outcar ( "OUTCAR" )
magmom = [ m [ 'tot' ] for m in outcar . magnetization ]
incar = Incar . from_file ( "INCAR" )
incar [ 'MAGMOM' ] = magmom
incar . write_file ( "INCAR" )
except :
logger . error ( 'MAGMOM copy from OUTCAR to INCAR failed' )
# Remove continuation so if a subsequent job is run in
# the same directory , will not restart this job .
if os . path . exists ( "continue.json" ) :
os . remove ( "continue.json" )
|
def skew ( self ) :
"skewness"
|
n , a , b = self . n , self . a , self . b
t1 = ( a + b + 2 * n ) * ( b - a ) / ( a + b + 2 )
t2 = sqrt ( ( 1 + a + b ) / ( n * a * b * ( n + a + b ) ) )
return t1 * t2
|
def typos ( self ) :
"""letter combinations one typo away from word"""
|
return ( self . _deletes ( ) | self . _transposes ( ) | self . _replaces ( ) | self . _inserts ( ) )
|
def get_contract ( self , contract_name : str ) -> Dict :
"""Return ABI , BIN of the given contract ."""
|
assert self . contracts , 'ContractManager should have contracts compiled'
return self . contracts [ contract_name ]
|
def parse_statement ( self , stream ) :
"""Statement : : = AggrGroup
| AggrObject
| AssignmentStmt"""
|
if self . has_group ( stream ) :
return self . parse_group ( stream )
if self . has_object ( stream ) :
return self . parse_object ( stream )
if self . has_assignment ( stream ) :
return self . parse_assignment ( stream )
if self . has_assignment_symbol ( stream ) :
return self . broken_assignment ( stream . lineno - 1 )
self . raise_unexpected ( stream )
|
def all_combinations ( items ) :
"""Generate all combinations of a given list of items ."""
|
return ( set ( compress ( items , mask ) ) for mask in product ( * [ [ 0 , 1 ] ] * len ( items ) ) )
|
def list_all_available ( self , id_vlan ) :
"""List all environment vips availables
: return : Following dictionary :
{ ' environment _ vip ' : [ { ' id ' : < id > ,
' finalidade _ txt ' : < finalidade _ txt > ,
' cliente _ txt ' : < cliente _ txt > ,
' ambiente _ p44 _ txt ' : < ambiente _ p44 _ txt > }
{ . . . other environments vip . . . } ] }
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response ."""
|
url = 'environmentvip/search/' + str ( id_vlan )
code , xml = self . submit ( None , 'GET' , url )
key = 'environment_vip'
return get_list_map ( self . response ( code , xml , [ key ] ) , key )
|
def headalongline ( self , x , y , layers = None ) :
"""Head along line or curve
Parameters
x : array
x values of line
y : array
y values of line
layers : integer , list or array , optional
layers for which grid is returned
Returns
h : array size ` nlayers , nx `"""
|
xg , yg = np . atleast_1d ( x ) , np . atleast_1d ( y )
if layers is None :
Nlayers = self . aq . find_aquifer_data ( xg [ 0 ] , yg [ 0 ] ) . naq
else :
Nlayers = len ( np . atleast_1d ( layers ) )
nx = len ( xg )
if len ( yg ) == 1 :
yg = yg * np . ones ( nx )
h = np . zeros ( ( Nlayers , nx ) )
for i in range ( nx ) :
h [ : , i ] = self . head ( xg [ i ] , yg [ i ] , layers )
return h
|
def create_param_info ( task_params , parameter_map ) :
"""Builds the code block for the GPTool GetParameterInfo method based on the input task _ params .
: param task _ params : A list of task parameters to map to GPTool parameters .
: return : A string representing the code block to the GPTool GetParameterInfo method ."""
|
gp_params = [ ]
gp_param_list = [ ]
gp_param_idx_list = [ ]
gp_param_idx = 0
for task_param in task_params : # Setup to gp _ param dictionary used to substitute against the parameter info template .
gp_param = { }
# Convert DataType
data_type = task_param [ 'type' ] . upper ( )
if 'dimensions' in task_param :
if len ( task_param [ 'dimensions' ] . split ( ',' ) ) > 1 :
raise UnknownDataTypeError ( 'Only one-dimensional arrays are supported.' )
data_type += 'ARRAY'
if data_type in parameter_map :
gp_param [ 'dataType' ] = parameter_map [ data_type ] . data_type
else : # No Mapping exists for this data type !
raise UnknownDataTypeError ( 'Unable to map task datatype: ' + data_type + '. A template must be created.' )
gp_param [ 'name' ] = task_param [ 'name' ]
gp_param [ 'displayName' ] = task_param [ 'display_name' ]
gp_param [ 'direction' ] = _DIRECTION_MAP [ task_param [ 'direction' ] ]
gp_param [ 'paramType' ] = 'Required' if task_param [ 'required' ] else 'Optional'
# ENVI / IDL output type translates to a derived output type in Arc
if gp_param [ 'direction' ] is 'Output' :
gp_param [ 'paramType' ] = 'Derived'
gp_param [ 'multiValue' ] = True if 'dimensions' in task_param else False
# Substitute values into the template
gp_params . append ( parameter_map [ data_type ] . get_parameter ( task_param ) . substitute ( gp_param ) )
# Convert the default value
if 'default_value' in task_param :
gp_param [ 'defaultValue' ] = task_param [ 'default_value' ]
gp_params . append ( parameter_map [ data_type ] . default_value ( ) . substitute ( gp_param ) )
# Convert any choicelist
if 'choice_list' in task_param :
gp_param [ 'choiceList' ] = task_param [ 'choice_list' ]
gp_params . append ( _CHOICELIST_TEMPLATE . substitute ( gp_param ) )
# Construct the parameter list and indicies for future reference
for param_name in parameter_map [ data_type ] . parameter_names ( task_param ) :
gp_param_list . append ( param_name . substitute ( gp_param ) )
gp_param_idx_list . append ( _PARAM_INDEX_TEMPLATE . substitute ( { 'name' : param_name . substitute ( gp_param ) , 'idx' : gp_param_idx } ) )
gp_param_idx += 1
# Construct the final parameter string
gp_params . append ( _PARAM_RETURN_TEMPLATE . substitute ( { 'paramList' : convert_list ( gp_param_list ) } ) )
return '' . join ( ( '' . join ( gp_params ) , '' . join ( gp_param_idx_list ) ) )
|
def to_userdata ( self ) :
"""Return a Userdata dictionary ."""
|
userdata = Userdata ( { 'd3' : self . memhi , 'd4' : self . memlo , 'd6' : self . control_flags , 'd7' : self . group , 'd8' : self . address . bytes [ 2 ] , 'd9' : self . address . bytes [ 1 ] , 'd10' : self . address . bytes [ 0 ] , 'd11' : self . data1 , 'd12' : self . data2 , 'd13' : self . data3 } )
return userdata
|
def get_imported_data ( csv_file , ** kwargs ) :
"""Reads the content of the Polarion exported csv file and returns imported data ."""
|
open_args = [ ]
open_kwargs = { }
try : # pylint : disable = pointless - statement
unicode
open_args . append ( "rb" )
except NameError :
open_kwargs [ "encoding" ] = "utf-8"
with open ( os . path . expanduser ( csv_file ) , * open_args , ** open_kwargs ) as input_file :
reader = _get_csv_reader ( input_file )
fieldnames = _get_csv_fieldnames ( reader )
if not fieldnames :
raise Dump2PolarionException ( "Cannot find field names in CSV file '{}'" . format ( csv_file ) )
results = _get_results ( reader , fieldnames )
if not results :
raise Dump2PolarionException ( "No results read from CSV file '{}'" . format ( csv_file ) )
testrun = _get_testrun_from_csv ( input_file , reader )
return xunit_exporter . ImportedData ( results = results , testrun = testrun )
|
def do_load ( self , filename ) :
"""Load disk image for analysis"""
|
try :
self . __session . load ( filename )
except IOError as e :
self . logger . error ( e . strerror )
|
def example_write_file_to_disk_if_changed ( ) :
"""Try to remove all comments from a file , and save it if changes were made ."""
|
my_file = FileAsObj ( '/tmp/example_file.txt' )
my_file . rm ( my_file . egrep ( '^#' ) )
if my_file . changed :
my_file . save ( )
|
def _item_to_bucket ( iterator , item ) :
"""Convert a JSON bucket to the native object .
: type iterator : : class : ` ~ google . api _ core . page _ iterator . Iterator `
: param iterator : The iterator that has retrieved the item .
: type item : dict
: param item : An item to be converted to a bucket .
: rtype : : class : ` . Bucket `
: returns : The next bucket in the page ."""
|
name = item . get ( "name" )
bucket = Bucket ( iterator . client , name )
bucket . _set_properties ( item )
return bucket
|
def str_fmthdr ( self , goid , goobj ) :
"""Return hdr line seen inside a GO Term box ."""
|
# Shorten : Ex : GO : 0007608 - > G0007608
go_txt = goid . replace ( "GO:" , "G" )
if 'mark_alt_id' in self . present and goid != goobj . id :
go_txt += 'a'
return go_txt
|
def save_ical ( self , ical_location ) : # type : ( str ) - > None
"""Save the calendar instance to a file"""
|
data = self . cal . to_ical ( )
with open ( ical_location , 'w' ) as ical_file :
ical_file . write ( data . decode ( 'utf-8' ) )
|
def merge_diff ( initial_config = None , initial_path = None , merge_config = None , merge_path = None , saltenv = 'base' ) :
'''Return the merge diff , as text , after merging the merge config into the
initial config .
initial _ config
The initial configuration sent as text . This argument is ignored when
` ` initial _ path ` ` is set .
initial _ path
Absolute or remote path from where to load the initial configuration
text . This argument allows any URI supported by
: py : func : ` cp . get _ url < salt . modules . cp . get _ url > ` ) , e . g . , ` ` salt : / / ` ` ,
` ` https : / / ` ` , ` ` s3 : / / ` ` , ` ` ftp : / ` ` , etc .
merge _ config
The config to be merged into the initial config , sent as text . This
argument is ignored when ` ` merge _ path ` ` is set .
merge _ path
Absolute or remote path from where to load the merge configuration
text . This argument allows any URI supported by
: py : func : ` cp . get _ url < salt . modules . cp . get _ url > ` ) , e . g . , ` ` salt : / / ` ` ,
` ` https : / / ` ` , ` ` s3 : / / ` ` , ` ` ftp : / ` ` , etc .
saltenv : ` ` base ` `
Salt fileserver environment from which to retrieve the file .
Ignored if ` ` initial _ path ` ` or ` ` merge _ path ` ` is not a ` ` salt : / / ` ` URL .
CLI Example :
. . code - block : : bash
salt ' * ' iosconfig . merge _ diff initial _ path = salt : / / path / to / running . cfg merge _ path = salt : / / path / to / merge . cfg'''
|
if initial_path :
initial_config = __salt__ [ 'cp.get_file_str' ] ( initial_path , saltenv = saltenv )
candidate_config = merge_text ( initial_config = initial_config , merge_config = merge_config , merge_path = merge_path , saltenv = saltenv )
clean_running_dict = tree ( config = initial_config )
clean_running = _print_config_text ( clean_running_dict )
return _get_diff_text ( clean_running , candidate_config )
|
def create_init ( self , path ) : # type : ( str ) - > None
"""Create a minimal _ _ init _ _ file with enough boiler plate to not add to lint messages
: param path :
: return :"""
|
source = """# coding=utf-8
\"\"\"
Version
\"\"\"
__version__ = \"0.0.0\"
"""
with io . open ( path , "w" , encoding = "utf-8" ) as outfile :
outfile . write ( source )
|
def count_range ( self , txn , from_key , to_key ) :
"""Counter number of records in the perstistent map with keys
within the given range .
: param txn : The transaction in which to run .
: type txn : : class : ` zlmdb . Transaction `
: param from _ key : Count records starting and including from this key .
: type from _ key : object
: param to _ key : End counting records before this key .
: type to _ key : object
: returns : The number of records .
: rtype : int"""
|
key_from = struct . pack ( '>H' , self . _slot ) + self . _serialize_key ( from_key )
to_key = struct . pack ( '>H' , self . _slot ) + self . _serialize_key ( to_key )
cnt = 0
cursor = txn . _txn . cursor ( )
has_more = cursor . set_range ( key_from )
while has_more :
if cursor . key ( ) >= to_key :
break
cnt += 1
has_more = cursor . next ( )
return cnt
|
def change_background ( color ) :
"""Setup the background color to * color * .
Example : :
change _ background ( ' black ' )
change _ background ( ' white ' )
change _ background ( ' # fffff ' )
You can call this function interactively by using : :
change _ color . interactive ( )
A new dialog will popup with a color chooser .
. . seealso : : : py : func : ` chemlab . graphics . colors . parse _ color `"""
|
viewer . widget . background_color = colors . any_to_rgb ( color )
viewer . update ( )
|
def encode_hdr ( self , boundary ) :
"""Returns the header of the encoding of this parameter"""
|
boundary = encode_and_quote ( boundary )
headers = [ "--%s" % boundary ]
if self . filename :
disposition = 'form-data; name="%s"; filename="%s"' % ( self . name , self . filename )
else :
disposition = 'form-data; name="%s"' % self . name
headers . append ( "Content-Disposition: %s" % disposition )
if self . filetype :
filetype = self . filetype
else :
filetype = "text/plain; charset=utf-8"
headers . append ( "Content-Type: %s" % filetype )
headers . append ( "" )
headers . append ( "" )
return "\r\n" . join ( headers )
|
def get_all_resource_data ( scenario_id , include_metadata = 'N' , page_start = None , page_end = None , ** kwargs ) :
"""A function which returns the data for all resources in a network ."""
|
rs_qry = db . DBSession . query ( ResourceAttr . attr_id , Attr . name . label ( 'attr_name' ) , ResourceAttr . id . label ( 'resource_attr_id' ) , ResourceAttr . ref_key , ResourceAttr . network_id , ResourceAttr . node_id , ResourceAttr . link_id , ResourceAttr . group_id , ResourceAttr . project_id , ResourceAttr . attr_is_var , ResourceScenario . scenario_id , ResourceScenario . source , Dataset . id . label ( 'dataset_id' ) , Dataset . name . label ( 'dataset_name' ) , Dataset . value , Dataset . unit_id , Dataset . hidden , Dataset . type , null ( ) . label ( 'metadata' ) , case ( [ ( ResourceAttr . node_id != None , Node . name ) , ( ResourceAttr . link_id != None , Link . name ) , ( ResourceAttr . group_id != None , ResourceGroup . name ) , ( ResourceAttr . network_id != None , Network . name ) , ] ) . label ( 'ref_name' ) , ) . join ( ResourceScenario , ResourceScenario . resource_attr_id == ResourceAttr . id ) . join ( Dataset , ResourceScenario . dataset_id == Dataset . id ) . join ( Attr , ResourceAttr . attr_id == Attr . id ) . outerjoin ( Node , ResourceAttr . node_id == Node . id ) . outerjoin ( Link , ResourceAttr . link_id == Link . id ) . outerjoin ( ResourceGroup , ResourceAttr . group_id == ResourceGroup . id ) . outerjoin ( Network , ResourceAttr . network_id == Network . id ) . filter ( ResourceScenario . scenario_id == scenario_id )
all_resource_data = rs_qry . all ( )
if page_start is not None and page_end is None :
all_resource_data = all_resource_data [ page_start : ]
elif page_start is not None and page_end is not None :
all_resource_data = all_resource_data [ page_start : page_end ]
log . info ( "%s datasets retrieved" , len ( all_resource_data ) )
if include_metadata == 'Y' :
metadata_qry = db . DBSession . query ( distinct ( Metadata . dataset_id ) . label ( 'dataset_id' ) , Metadata . key , Metadata . value ) . filter ( ResourceScenario . resource_attr_id == ResourceAttr . id , ResourceScenario . scenario_id == scenario_id , Dataset . id == ResourceScenario . dataset_id , Metadata . dataset_id == Dataset . id )
log . info ( "Querying node metadata" )
metadata = metadata_qry . all ( )
log . info ( "%s metadata items retrieved" , len ( metadata ) )
metadata_dict = { }
for m in metadata :
if metadata_dict . get ( m . dataset_id ) :
metadata_dict [ m . dataset_id ] . append ( m )
else :
metadata_dict [ m . dataset_id ] = [ m ]
return_data = [ ]
for ra in all_resource_data :
ra_dict = ra . _asdict ( )
if ra . hidden == 'Y' :
try :
d = db . DBSession . query ( Dataset ) . filter ( Dataset . id == ra . dataset_id ) . options ( noload ( 'metadata' ) ) . one ( )
d . check_read_permission ( kwargs . get ( 'user_id' ) )
except :
ra_dict [ 'value' ] = None
ra_dict [ 'metadata' ] = [ ]
else :
if include_metadata == 'Y' :
ra_dict [ 'metadata' ] = metadata_dict . get ( ra . dataset_id , [ ] )
return_data . append ( namedtuple ( 'ResourceData' , ra_dict . keys ( ) ) ( ** ra_dict ) )
log . info ( "Returning %s datasets" , len ( return_data ) )
return return_data
|
def dump ( self , path ) :
"""dump DictTree data to json files ."""
|
try :
with open ( path , "wb" ) as f :
f . write ( self . __str__ ( ) . encode ( "utf-8" ) )
except :
pass
with open ( path , "wb" ) as f :
pickle . dump ( self . __data__ , f )
|
def lens_model_plot ( ax , lensModel , kwargs_lens , numPix = 500 , deltaPix = 0.01 , sourcePos_x = 0 , sourcePos_y = 0 , point_source = False , with_caustics = False ) :
"""plots a lens model ( convergence ) and the critical curves and caustics
: param ax :
: param kwargs _ lens :
: param numPix :
: param deltaPix :
: return :"""
|
kwargs_data = sim_util . data_configure_simple ( numPix , deltaPix )
data = Data ( kwargs_data )
_frame_size = numPix * deltaPix
_coords = data . _coords
x_grid , y_grid = data . coordinates
lensModelExt = LensModelExtensions ( lensModel )
# ra _ crit _ list , dec _ crit _ list , ra _ caustic _ list , dec _ caustic _ list = lensModelExt . critical _ curve _ caustics (
# kwargs _ lens , compute _ window = _ frame _ size , grid _ scale = deltaPix / 2 . )
x_grid1d = util . image2array ( x_grid )
y_grid1d = util . image2array ( y_grid )
kappa_result = lensModel . kappa ( x_grid1d , y_grid1d , kwargs_lens )
kappa_result = util . array2image ( kappa_result )
im = ax . matshow ( np . log10 ( kappa_result ) , origin = 'lower' , extent = [ 0 , _frame_size , 0 , _frame_size ] , cmap = 'Greys' , vmin = - 1 , vmax = 1 )
# , cmap = self . _ cmap , vmin = v _ min , vmax = v _ max )
if with_caustics is True :
ra_crit_list , dec_crit_list = lensModelExt . critical_curve_tiling ( kwargs_lens , compute_window = _frame_size , start_scale = deltaPix , max_order = 10 )
ra_caustic_list , dec_caustic_list = lensModel . ray_shooting ( ra_crit_list , dec_crit_list , kwargs_lens )
plot_line_set ( ax , _coords , ra_caustic_list , dec_caustic_list , color = 'g' )
plot_line_set ( ax , _coords , ra_crit_list , dec_crit_list , color = 'r' )
if point_source :
from lenstronomy . LensModel . Solver . lens_equation_solver import LensEquationSolver
solver = LensEquationSolver ( lensModel )
theta_x , theta_y = solver . image_position_from_source ( sourcePos_x , sourcePos_y , kwargs_lens , min_distance = deltaPix , search_window = deltaPix * numPix )
mag_images = lensModel . magnification ( theta_x , theta_y , kwargs_lens )
x_image , y_image = _coords . map_coord2pix ( theta_x , theta_y )
abc_list = [ 'A' , 'B' , 'C' , 'D' , 'E' , 'F' , 'G' , 'H' , 'I' , 'J' , 'K' ]
for i in range ( len ( x_image ) ) :
x_ = ( x_image [ i ] + 0.5 ) * deltaPix
y_ = ( y_image [ i ] + 0.5 ) * deltaPix
ax . plot ( x_ , y_ , 'dk' , markersize = 4 * ( 1 + np . log ( np . abs ( mag_images [ i ] ) ) ) , alpha = 0.5 )
ax . text ( x_ , y_ , abc_list [ i ] , fontsize = 20 , color = 'k' )
x_source , y_source = _coords . map_coord2pix ( sourcePos_x , sourcePos_y )
ax . plot ( ( x_source + 0.5 ) * deltaPix , ( y_source + 0.5 ) * deltaPix , '*k' , markersize = 10 )
ax . get_xaxis ( ) . set_visible ( False )
ax . get_yaxis ( ) . set_visible ( False )
ax . autoscale ( False )
# image _ position _ plot ( ax , _ coords , self . _ kwargs _ else )
# source _ position _ plot ( ax , self . _ coords , self . _ kwargs _ source )
return ax
|
def slice_naive ( self , key ) :
"""Slice a data object based on its index , either by value ( . loc ) or
position ( . iloc ) .
Args :
key : Single index value , slice , tuple , or list of indices / positionals
Returns :
data : Slice of self"""
|
cls = self . __class__
key = check_key ( self , key )
return cls ( self . loc [ key ] )
|
def is_union ( etype ) -> bool :
"""Determine whether etype is a Union"""
|
return getattr ( etype , '__origin__' , None ) is not None and getattr ( etype . __origin__ , '_name' , None ) and etype . __origin__ . _name == 'Union'
|
def get_end_time ( self ) :
"""Get observation end time from file metadata ."""
|
mda_dict = self . filehandle . attributes ( )
core_mda = mda_dict [ 'coremetadata' ]
end_time_str = self . parse_metadata_string ( core_mda )
self . _end_time = datetime . strptime ( end_time_str , "%Y-%m-%dT%H:%M:%SZ" )
|
def _encode_payload ( data , headers = None ) :
"Wrap data in an SCGI request ."
|
prolog = "CONTENT_LENGTH\0%d\0SCGI\x001\0" % len ( data )
if headers :
prolog += _encode_headers ( headers )
return _encode_netstring ( prolog ) + data
|
def searchable_object_types ( self ) :
"""List of ( object _ types , friendly name ) present in the index ."""
|
try :
idx = self . index ( )
except KeyError : # index does not exists : service never started , may happens during
# tests
return [ ]
with idx . reader ( ) as r :
indexed = sorted ( set ( r . field_terms ( "object_type" ) ) )
app_indexed = self . app_state . indexed_fqcn
return [ ( name , friendly_fqcn ( name ) ) for name in indexed if name in app_indexed ]
|
def normalize_path ( path ) : # type : ( AnyStr ) - > AnyStr
"""Return a case - normalized absolute variable - expanded path .
: param str path : The non - normalized path
: return : A normalized , expanded , case - normalized path
: rtype : str"""
|
return os . path . normpath ( os . path . normcase ( os . path . abspath ( os . path . expandvars ( os . path . expanduser ( str ( path ) ) ) ) ) )
|
def CreateStorageReaderForFile ( cls , path ) :
"""Creates a storage reader based on the file .
Args :
path ( str ) : path to the storage file .
Returns :
StorageReader : a storage reader or None if the storage file cannot be
opened or the storage format is not supported ."""
|
if sqlite_file . SQLiteStorageFile . CheckSupportedFormat ( path , check_readable_only = True ) :
return sqlite_reader . SQLiteStorageFileReader ( path )
return None
|
def interactive ( ) :
"""Create an interactive command line tool .
Wrapper for an interactive session for manual commands to be entered ."""
|
parser = argparse . ArgumentParser ( description = __doc__ )
parser . add_argument ( '--device' , default = '/dev/ttyUSB0' , help = 'Path to PLM device' )
parser . add_argument ( '-v' , '--verbose' , action = 'count' , help = 'Set logging level to verbose' )
parser . add_argument ( '-l' , '--logfile' , default = '' , help = 'Log file name' )
parser . add_argument ( '--workdir' , default = '' , help = 'Working directory for reading and saving ' 'device information.' )
args = parser . parse_args ( )
loop = asyncio . get_event_loop ( )
cmd = Commander ( loop , args )
cmd . start ( )
try :
loop . run_forever ( )
except KeyboardInterrupt :
if cmd . tools . plm :
if cmd . tools . plm . transport : # _ LOGGING . info ( ' Closing the session ' )
cmd . tools . plm . transport . close ( )
loop . stop ( )
pending = asyncio . Task . all_tasks ( loop = loop )
for task in pending :
task . cancel ( )
try :
loop . run_until_complete ( task )
except asyncio . CancelledError :
pass
except KeyboardInterrupt :
pass
loop . close ( )
|
def get_user ( uid ) :
"""Get an user by the UID .
: param str uid : UID to find
: return : the user
: rtype : User object
: raises ValueError : uid is not an integer
: raises KeyError : if user does not exist"""
|
if db is not None :
try :
uid = uid . decode ( 'utf-8' )
except AttributeError :
pass
d = db . hgetall ( 'user:{0}' . format ( uid ) )
if d :
nd = { }
# strings everywhere
for k in d :
try :
nd [ k . decode ( 'utf-8' ) ] = d [ k ] . decode ( 'utf-8' )
except AttributeError :
try :
nd [ k . decode ( 'utf-8' ) ] = d [ k ]
except AttributeError :
nd [ k ] = d [ k ]
for p in PERMISSIONS :
nd [ p ] = nd . get ( p ) == '1'
return User ( uid = uid , ** nd )
else :
return None
else :
d = app . config [ 'COIL_USERS' ] . get ( uid )
if d :
return User ( uid = uid , ** d )
else :
return None
|
def listClients ( self , * args , ** kwargs ) :
"""List Clients
Get a list of all clients . With ` prefix ` , only clients for which
it is a prefix of the clientId are returned .
By default this end - point will try to return up to 1000 clients in one
request . But it * * may return less , even none * * .
It may also return a ` continuationToken ` even though there are no more
results . However , you can only be sure to have seen all results if you
keep calling ` listClients ` with the last ` continuationToken ` until you
get a result without a ` continuationToken ` .
This method gives output : ` ` v1 / list - clients - response . json # ` `
This method is ` ` stable ` `"""
|
return self . _makeApiCall ( self . funcinfo [ "listClients" ] , * args , ** kwargs )
|
def getLabel ( self ) :
"""Retrieve metadata about the text
: rtype : Metadata
: returns : Dictionary with label informations"""
|
response = xmlparser ( self . retriever . getLabel ( urn = str ( self . urn ) ) )
self . _parse_request ( response . xpath ( "//ti:reply/ti:label" , namespaces = XPATH_NAMESPACES ) [ 0 ] )
return self . metadata
|
def auth_url ( self , scope ) :
"""Gets the url a user needs to access to give up a user token"""
|
params = { 'response_type' : 'code' , 'client_id' : self . __client_id , 'redirect_uri' : self . __redirect_uri , 'scope' : scope }
if self . __state is not None :
params [ 'state' ] = self . __state
return settings . AUTH_ENDPOINT + '/authorize?' + urllib . urlencode ( params )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.