signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def step ( self , x ) :
r"""perform a single Brownian dynamics step"""
|
return x - self . coeff_A * self . gradient ( x ) + self . coeff_B * np . random . normal ( size = self . dim )
|
def execute_sql ( server_context , schema_name , sql , container_path = None , max_rows = None , sort = None , offset = None , container_filter = None , save_in_session = None , parameters = None , required_version = None , timeout = _default_timeout ) :
"""Execute sql query against a LabKey server .
: param server _ context : A LabKey server context . See utils . create _ server _ context .
: param schema _ name : schema of table
: param sql : String of labkey sql to execute
: param container _ path : labkey container path if not already set in context
: param max _ rows : max number of rows to return
: param sort : comma separated list of column names to sort by
: param offset : number of rows to offset results by
: param container _ filter : enumeration of the various container filters available . See :
https : / / www . labkey . org / download / clientapi _ docs / javascript - api / symbols / LABKEY . Query . html # . containerFilter
: param save _ in _ session : save query result as a named view to the session
: param parameters : parameter values to pass through to a parameterized query
: param required _ version : Api version of response
: param timeout : timeout of request in seconds ( defaults to 30s )
: return :"""
|
url = server_context . build_url ( 'query' , 'executeSql.api' , container_path = container_path )
payload = { 'schemaName' : schema_name , 'sql' : sql }
if container_filter is not None :
payload [ 'containerFilter' ] = container_filter
if max_rows is not None :
payload [ 'maxRows' ] = max_rows
if offset is not None :
payload [ 'offset' ] = offset
if sort is not None :
payload [ 'query.sort' ] = sort
if save_in_session is not None :
payload [ 'saveInSession' ] = save_in_session
if parameters is not None :
for key , value in parameters . items ( ) :
payload [ 'query.param.' + key ] = value
if required_version is not None :
payload [ 'apiVersion' ] = required_version
return server_context . make_request ( url , payload , timeout = timeout )
|
def _generate_request ( self , callname , request ) :
"""Generate a request object for delivery to the API"""
|
# Retrieve path from API class
schema = self . api . request_schema ( )
schema . context [ 'callname' ] = callname
return schema . dump ( request ) . data . get ( "payload" )
|
def _create_response_record ( self , response ) :
"""Creates record for lexicon API calls"""
|
record = dict ( )
record [ 'id' ] = response [ 'id' ]
record [ 'type' ] = response [ 'type' ]
record [ 'name' ] = self . _full_name ( response [ 'name' ] )
if 'content' in response :
record [ 'content' ] = response [ 'content' ] or ""
if 'ttl' in response :
record [ 'ttl' ] = response [ 'ttl' ]
if 'prio' in response :
record [ 'priority' ] = response [ 'prio' ]
return record
|
def _get_from_java_home ( self ) :
"""Retrieves the Java library path according to the JAVA _ HOME environment
variable
: return : The path to the JVM library , or None"""
|
# Get the environment variable
java_home = os . getenv ( "JAVA_HOME" )
if java_home and os . path . exists ( java_home ) : # Get the real installation path
java_home = os . path . realpath ( java_home )
# Cygwin has a bug in realpath
if not os . path . exists ( java_home ) :
java_home = os . getenv ( "JAVA_HOME" )
# Look for the library file
return self . find_libjvm ( java_home )
|
def changelog_file_option_validator ( ctx , param , value ) :
"""Checks that the given file path exists in the current working directory .
Returns a : class : ` ~ pathlib . Path ` object . If the file does not exist raises
a : class : ` ~ click . UsageError ` exception ."""
|
path = Path ( value )
if not path . exists ( ) :
filename = click . style ( path . name , fg = "blue" , bold = True )
ctx . fail ( "\n" f" {x_mark} Unable to find {filename}\n" ' Run "$ brau init" to create one' )
return path
|
def search_worker ( self , regex , directory_path , names , binary = False , callback = None ) :
"""build a DirectoryResult for the given regex , directory path , and file names"""
|
try :
result = DirectoryResult ( directory_path )
def find_matches ( name ) :
full_path = path . join ( directory_path , name )
file_contents = get_file_contents ( full_path , binary )
start = 0
match = regex . search ( file_contents , start )
while match :
result . put ( name , file_contents , match )
start = match . end ( )
match = regex . search ( file_contents , start )
for name in names :
try :
find_matches ( name )
except IOError :
pass
if callback :
callback ( result )
except KeyboardInterrupt , e :
exit ( 1 )
|
def indexbox ( msg = "Shall I continue?" , title = " " , choices = ( "Yes" , "No" ) , image = None ) :
"""Display a buttonbox with the specified choices .
Return the index of the choice selected ."""
|
reply = buttonbox ( msg = msg , choices = choices , title = title , image = image )
index = - 1
for choice in choices :
index = index + 1
if reply == choice :
return index
raise AssertionError ( "There is a program logic error in the EasyGui code for indexbox." )
|
def multi_p_run ( tot_num , _func , worker , params , n_process ) :
"""Run _ func with multi - process using params ."""
|
from multiprocessing import Process , Queue
out_q = Queue ( )
procs = [ ]
split_num = split_seq ( list ( range ( 0 , tot_num ) ) , n_process )
print ( tot_num , ">>" , split_num )
split_len = len ( split_num )
if n_process > split_len :
n_process = split_len
for i in range ( n_process ) :
_p = Process ( target = _func , args = ( worker , split_num [ i ] [ 0 ] , split_num [ i ] [ 1 ] , params , out_q ) )
_p . daemon = True
procs . append ( _p )
_p . start ( )
try :
result = [ ]
for i in range ( n_process ) :
result . append ( out_q . get ( ) )
for i in procs :
i . join ( )
except KeyboardInterrupt :
print ( 'Killing all the children in the pool.' )
for i in procs :
i . terminate ( )
i . join ( )
return - 1
while not out_q . empty ( ) :
print ( out_q . get ( block = False ) )
return result
|
def present ( name , level , devices , ** kwargs ) :
'''Verify that the raid is present
. . versionchanged : : 2014.7.0
name
The name of raid device to be created
level
The RAID level to use when creating the raid .
devices
A list of devices used to build the array .
kwargs
Optional arguments to be passed to mdadm .
Example :
. . code - block : : yaml
/ dev / md0:
raid . present :
- level : 5
- devices :
- / dev / xvdd
- / dev / xvde
- / dev / xvdf
- chunk : 256
- run : True'''
|
ret = { 'changes' : { } , 'comment' : '' , 'name' : name , 'result' : True }
# Device exists
raids = __salt__ [ 'raid.list' ] ( )
present = raids . get ( name )
# Decide whether to create or assemble
missing = [ ]
uuid_dict = { }
new_devices = [ ]
for dev in devices :
if dev == 'missing' or not __salt__ [ 'file.access' ] ( dev , 'f' ) :
missing . append ( dev )
continue
superblock = __salt__ [ 'raid.examine' ] ( dev , quiet = True )
if 'MD_UUID' in superblock :
uuid = superblock [ 'MD_UUID' ]
if uuid not in uuid_dict :
uuid_dict [ uuid ] = [ ]
uuid_dict [ uuid ] . append ( dev )
else :
new_devices . append ( dev )
if len ( uuid_dict ) > 1 :
ret [ 'comment' ] = 'Devices are a mix of RAID constituents with multiple MD_UUIDs: {0}.' . format ( sorted ( uuid_dict . keys ( ) ) )
ret [ 'result' ] = False
return ret
elif len ( uuid_dict ) == 1 :
uuid = list ( uuid_dict . keys ( ) ) [ 0 ]
if present and present [ 'uuid' ] != uuid :
ret [ 'comment' ] = 'Devices MD_UUIDs: {0} differs from present RAID uuid {1}.' . format ( uuid , present [ 'uuid' ] )
ret [ 'result' ] = False
return ret
devices_with_superblock = uuid_dict [ uuid ]
else :
devices_with_superblock = [ ]
if present :
do_assemble = False
do_create = False
elif devices_with_superblock :
do_assemble = True
do_create = False
verb = 'assembled'
else :
if not new_devices :
ret [ 'comment' ] = 'All devices are missing: {0}.' . format ( missing )
ret [ 'result' ] = False
return ret
do_assemble = False
do_create = True
verb = 'created'
# If running with test use the test _ mode with create or assemble
if __opts__ [ 'test' ] :
if do_assemble :
res = __salt__ [ 'raid.assemble' ] ( name , devices_with_superblock , test_mode = True , ** kwargs )
elif do_create :
res = __salt__ [ 'raid.create' ] ( name , level , new_devices + [ 'missing' ] * len ( missing ) , test_mode = True , ** kwargs )
if present :
ret [ 'comment' ] = 'Raid {0} already present.' . format ( name )
if do_assemble or do_create :
ret [ 'comment' ] = 'Raid will be {0} with: {1}' . format ( verb , res )
ret [ 'result' ] = None
if ( do_assemble or present ) and new_devices :
ret [ 'comment' ] += ' New devices will be added: {0}' . format ( new_devices )
ret [ 'result' ] = None
if missing :
ret [ 'comment' ] += ' Missing devices: {0}' . format ( missing )
return ret
# Attempt to create or assemble the array
if do_assemble :
__salt__ [ 'raid.assemble' ] ( name , devices_with_superblock , ** kwargs )
elif do_create :
__salt__ [ 'raid.create' ] ( name , level , new_devices + [ 'missing' ] * len ( missing ) , ** kwargs )
if not present :
raids = __salt__ [ 'raid.list' ] ( )
changes = raids . get ( name )
if changes :
ret [ 'comment' ] = 'Raid {0} {1}.' . format ( name , verb )
ret [ 'changes' ] = changes
# Saving config
__salt__ [ 'raid.save_config' ] ( )
else :
ret [ 'comment' ] = 'Raid {0} failed to be {1}.' . format ( name , verb )
ret [ 'result' ] = False
else :
ret [ 'comment' ] = 'Raid {0} already present.' . format ( name )
if ( do_assemble or present ) and new_devices and ret [ 'result' ] :
for d in new_devices :
res = __salt__ [ 'raid.add' ] ( name , d )
if not res :
ret [ 'comment' ] += ' Unable to add {0} to {1}.\n' . format ( d , name )
ret [ 'result' ] = False
else :
ret [ 'comment' ] += ' Added new device {0} to {1}.\n' . format ( d , name )
if ret [ 'result' ] :
ret [ 'changes' ] [ 'added' ] = new_devices
if missing :
ret [ 'comment' ] += ' Missing devices: {0}' . format ( missing )
return ret
|
def merge_equal_neighbors ( self ) :
"""Merge neighbors with same speaker ."""
|
IDX_LENGTH = 3
merged = self . segs . copy ( )
current_start = 0
j = 0
seg = self . segs . iloc [ 0 ]
for i in range ( 1 , self . num_segments ) :
seg = self . segs . iloc [ i ]
last = self . segs . iloc [ i - 1 ]
if seg . speaker == last . speaker :
merged . iat [ j , IDX_LENGTH ] = seg . start + seg . length - current_start
else :
j += 1
merged . iloc [ j ] = seg
current_start = seg . start
merged = merged . iloc [ : ( j + 1 ) ]
merged . sort_values ( 'start' , inplace = True )
return self . update_segs ( merged )
|
def _handle_start_node ( self , attrs ) :
"""Handle opening node element
: param attrs : Attributes of the element
: type attrs : Dict"""
|
self . _curr = { 'attributes' : dict ( attrs ) , 'lat' : None , 'lon' : None , 'node_id' : None , 'tags' : { } }
if attrs . get ( 'id' , None ) is not None :
self . _curr [ 'node_id' ] = int ( attrs [ 'id' ] )
del self . _curr [ 'attributes' ] [ 'id' ]
if attrs . get ( 'lat' , None ) is not None :
self . _curr [ 'lat' ] = Decimal ( attrs [ 'lat' ] )
del self . _curr [ 'attributes' ] [ 'lat' ]
if attrs . get ( 'lon' , None ) is not None :
self . _curr [ 'lon' ] = Decimal ( attrs [ 'lon' ] )
del self . _curr [ 'attributes' ] [ 'lon' ]
|
def _from_frame ( cls , frame ) :
"Copy constructor"
|
assert frame . _framespec == cls . _framespec
new = cls ( flags = frame . flags , frameno = frame . frameno )
for spec in cls . _framespec :
setattr ( new , spec . name , getattr ( frame , spec . name , None ) )
return new
|
def delete_where_user_id ( cls , user_id ) :
"""delete by email"""
|
result = cls . where_user_id ( user_id )
if result is None :
return None
result . delete ( )
return True
|
def parse_GPL ( filepath , entry_name = None , partial = None ) :
"""Parse GPL entry from SOFT file .
Args :
filepath ( : obj : ` str ` or : obj : ` Iterable ` ) : Path to file with 1 GPL entry
or list of lines representing GPL from GSE file .
entry _ name ( : obj : ` str ` , optional ) : Name of the entry . By default it is
inferred from the data .
partial ( : obj : ' iterable ' , optional ) : A list of accession IDs of GSMs
to be partially extracted from GPL , works only if a file / accession
is a GPL .
Returns :
: obj : ` GEOparse . GPL ` : A GPL object ."""
|
gsms = { }
gses = { }
gpl_soft = [ ]
has_table = False
gpl_name = entry_name
database = None
if isinstance ( filepath , str ) :
with utils . smart_open ( filepath ) as soft :
groupper = groupby ( soft , lambda x : x . startswith ( "^" ) )
for is_new_entry , group in groupper :
if is_new_entry :
entry_type , entry_name = __parse_entry ( next ( group ) )
logger . debug ( "%s: %s" % ( entry_type . upper ( ) , entry_name ) )
if entry_type == "SERIES" :
is_data , data_group = next ( groupper )
gse_metadata = parse_metadata ( data_group )
gses [ entry_name ] = GSE ( name = entry_name , metadata = gse_metadata )
elif entry_type == "SAMPLE" :
if partial and entry_name not in partial :
continue
is_data , data_group = next ( groupper )
gsms [ entry_name ] = parse_GSM ( data_group , entry_name )
elif entry_type == "DATABASE" :
is_data , data_group = next ( groupper )
database_metadata = parse_metadata ( data_group )
database = GEODatabase ( name = entry_name , metadata = database_metadata )
elif entry_type == "PLATFORM" or entry_type == "Annotation" :
gpl_name = entry_name
is_data , data_group = next ( groupper )
has_gpl_name = gpl_name or gpl_name is None
for line in data_group :
if ( "_table_begin" in line or not line . startswith ( ( "^" , "!" , "#" ) ) ) :
has_table = True
if not has_gpl_name :
if match ( "!Annotation_platform\s*=\s*" , line ) :
gpl_name = split ( "\s*=\s*" , line ) [ - 1 ] . strip ( )
has_gpl_name = True
gpl_soft . append ( line )
else :
raise RuntimeError ( "Cannot parse {etype}. Unknown for GPL." . format ( etype = entry_type ) )
else :
for line in filepath :
if "_table_begin" in line or ( not line . startswith ( ( "^" , "!" , "#" ) ) ) :
has_table = True
gpl_soft . append ( line . rstrip ( ) )
columns = None
try :
columns = parse_columns ( gpl_soft )
except Exception :
pass
metadata = parse_metadata ( gpl_soft )
if has_table :
table_data = parse_table_data ( gpl_soft )
else :
table_data = DataFrame ( )
gpl = GPL ( name = gpl_name , gses = gses , gsms = gsms , table = table_data , metadata = metadata , columns = columns , database = database )
# link samples to series , if these were present in the GPL soft file
for gse_id , gse in gpl . gses . items ( ) :
for gsm_id in gse . metadata . get ( "sample_id" , [ ] ) :
if gsm_id in gpl . gsms :
gpl . gses [ gse_id ] . gsms [ gsm_id ] = gpl . gsms [ gsm_id ]
return gpl
|
def download ( link , outdir = '.' , chunk_size = 4096 ) :
'''This is the Main function , which downloads a given link
and saves on outdir ( default = current directory )'''
|
url = None
fh = None
eta = 'unknown '
bytes_so_far = 0
filename = filename_from_url ( link ) or "."
cj = cjar . CookieJar ( )
# get filename for temp file in current directory
( fd_tmp , tmpfile ) = tempfile . mkstemp ( ".tmp" , prefix = filename + "." , dir = outdir )
os . close ( fd_tmp )
os . unlink ( tmpfile )
try :
opener = ulib . build_opener ( ulib . HTTPCookieProcessor ( cj ) )
url = opener . open ( link )
fh = open ( tmpfile , mode = 'wb' )
headers = url . info ( )
try :
total_size = int ( headers [ 'Content-Length' ] )
except ( ValueError , KeyError , TypeError ) :
total_size = 'unknown'
try :
md5_header = headers [ 'Content-MD5' ]
except ( ValueError , KeyError , TypeError ) :
md5_header = None
# Define which callback we ' re gonna use
if total_size != 'unknown' :
if CONSOLE_WIDTH > 57 :
reporthook = report_bar
else :
reporthook = report_onlysize
else :
reporthook = report_unknown
# Below are the registers to calculate network transfer rate
time_register = time ( )
speed = 0.0
speed_list = [ ]
bytes_register = 0.0
eta = 'unknown '
# Loop that reads in chunks , calculates speed and does the callback to
# print the progress
while True :
chunk = url . read ( chunk_size )
# Update Download Speed every 1 second
if time ( ) - time_register > 0.5 :
speed = ( bytes_so_far - bytes_register ) / ( time ( ) - time_register )
speed_list . append ( speed )
# Set register properly for future use
time_register = time ( )
bytes_register = bytes_so_far
# Estimative of remaining download time
if total_size != 'unknown' and len ( speed_list ) == 3 :
speed_mean = sum ( speed_list ) / 3
eta_sec = int ( ( total_size - bytes_so_far ) / speed_mean )
eta = str ( datetime . timedelta ( seconds = eta_sec ) )
speed_list = [ ]
bytes_so_far += len ( chunk )
if not chunk :
sys . stdout . write ( '\n' )
break
fh . write ( chunk )
reporthook ( bytes_so_far , total_size , speed , eta )
except KeyboardInterrupt :
print ( '\n\nCtrl + C: Download aborted by user' )
print ( 'Partial downloaded file:\n{0}' . format ( os . path . abspath ( tmpfile ) ) )
sys . exit ( 1 )
finally :
if url :
url . close ( )
if fh :
fh . close ( )
filenamealt = filename_from_headers ( headers )
if filenamealt :
filename = filenamealt
# add numeric ' ( x ) ' suffix if filename already exists
if os . path . exists ( os . path . join ( outdir , filename ) ) :
filename = filename_fix_existing ( filename , outdir )
filename = os . path . join ( outdir , filename )
shutil . move ( tmpfile , filename )
# Check if sizes matches
if total_size != 'unknown' and total_size != bytes_so_far :
print ( '\n\nWARNING!! Downloaded file size mismatches... Probably corrupted...' )
# Check md5 if it was in html header
if md5_header :
print ( '\nValidating MD5 checksum...' )
if md5_header == md5sum ( filename ) :
print ( 'MD5 checksum passed!' )
else :
print ( 'MD5 checksum do NOT passed!!!' )
return filename
|
def _get_criteria_matching_disks ( logical_disk , physical_drives ) :
"""Finds the physical drives matching the criteria of logical disk .
This method finds the physical drives matching the criteria
of the logical disk passed .
: param logical _ disk : The logical disk dictionary from raid config
: param physical _ drives : The physical drives to consider .
: returns : A list of physical drives which match the criteria"""
|
matching_physical_drives = [ ]
criteria_to_consider = [ x for x in FILTER_CRITERIA if x in logical_disk ]
for physical_drive_object in physical_drives :
for criteria in criteria_to_consider :
logical_drive_value = logical_disk . get ( criteria )
physical_drive_value = getattr ( physical_drive_object , criteria )
if logical_drive_value != physical_drive_value :
break
else :
matching_physical_drives . append ( physical_drive_object )
return matching_physical_drives
|
def add_optionals ( self , optionals_in , optionals_out ) :
"""Add optional inputs and outputs to the model spec .
Parameters
optionals _ in : [ str ]
List of inputs that are optionals .
optionals _ out : [ str ]
List of outputs that are optionals .
See Also
set _ input , set _ output"""
|
spec = self . spec
if ( not optionals_in ) and ( not optionals_out ) :
return
# assuming single sizes here
input_types = [ datatypes . Array ( dim ) for ( name , dim ) in optionals_in ]
output_types = [ datatypes . Array ( dim ) for ( name , dim ) in optionals_out ]
input_names = [ str ( name ) for ( name , dim ) in optionals_in ]
output_names = [ str ( name ) for ( name , dim ) in optionals_out ]
input_features = list ( zip ( input_names , input_types ) )
output_features = list ( zip ( output_names , output_types ) )
len_before_in = len ( spec . description . input )
len_before_out = len ( spec . description . output )
# this appends to the existing model interface
set_transform_interface_params ( spec , input_features , output_features , True )
# add types for any extra hidden inputs
for idx in range ( len_before_in , len ( spec . description . input ) ) :
spec . description . input [ idx ] . type . multiArrayType . dataType = _Model_pb2 . ArrayFeatureType . DOUBLE
for idx in range ( len_before_out , len ( spec . description . output ) ) :
spec . description . output [ idx ] . type . multiArrayType . dataType = _Model_pb2 . ArrayFeatureType . DOUBLE
|
def _build ( self , inputs , prev_state ) :
"""Connects the GRU module into the graph .
If this is not the first time the module has been connected to the graph ,
the Tensors provided as inputs and state must have the same final
dimension , in order for the existing variables to be the correct size for
their corresponding multiplications . The batch size may differ for each
connection .
Args :
inputs : Tensor of size ` [ batch _ size , input _ size ] ` .
prev _ state : Tensor of size ` [ batch _ size , hidden _ size ] ` .
Returns :
A tuple ( output , next _ state ) where ` output ` is a Tensor of size
` [ batch _ size , hidden _ size ] ` and ` next _ state ` is a Tensor of size
` [ batch _ size , hidden _ size ] ` .
Raises :
ValueError : If connecting the module into the graph any time after the
first time , and the inferred size of the inputs does not match previous
invocations ."""
|
input_size = inputs . get_shape ( ) [ 1 ]
weight_shape = ( input_size , self . _hidden_size )
u_shape = ( self . _hidden_size , self . _hidden_size )
bias_shape = ( self . _hidden_size , )
self . _wz = tf . get_variable ( GRU . WZ , weight_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . WZ ) , partitioner = self . _partitioners . get ( GRU . WZ ) , regularizer = self . _regularizers . get ( GRU . WZ ) )
self . _uz = tf . get_variable ( GRU . UZ , u_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . UZ ) , partitioner = self . _partitioners . get ( GRU . UZ ) , regularizer = self . _regularizers . get ( GRU . UZ ) )
self . _bz = tf . get_variable ( GRU . BZ , bias_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . BZ ) , partitioner = self . _partitioners . get ( GRU . BZ ) , regularizer = self . _regularizers . get ( GRU . BZ ) )
z = tf . sigmoid ( tf . matmul ( inputs , self . _wz ) + tf . matmul ( prev_state , self . _uz ) + self . _bz )
self . _wr = tf . get_variable ( GRU . WR , weight_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . WR ) , partitioner = self . _partitioners . get ( GRU . WR ) , regularizer = self . _regularizers . get ( GRU . WR ) )
self . _ur = tf . get_variable ( GRU . UR , u_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . UR ) , partitioner = self . _partitioners . get ( GRU . UR ) , regularizer = self . _regularizers . get ( GRU . UR ) )
self . _br = tf . get_variable ( GRU . BR , bias_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . BR ) , partitioner = self . _partitioners . get ( GRU . BR ) , regularizer = self . _regularizers . get ( GRU . BR ) )
r = tf . sigmoid ( tf . matmul ( inputs , self . _wr ) + tf . matmul ( prev_state , self . _ur ) + self . _br )
self . _wh = tf . get_variable ( GRU . WH , weight_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . WH ) , partitioner = self . _partitioners . get ( GRU . WH ) , regularizer = self . _regularizers . get ( GRU . WH ) )
self . _uh = tf . get_variable ( GRU . UH , u_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . UH ) , partitioner = self . _partitioners . get ( GRU . UH ) , regularizer = self . _regularizers . get ( GRU . UH ) )
self . _bh = tf . get_variable ( GRU . BH , bias_shape , dtype = inputs . dtype , initializer = self . _initializers . get ( GRU . BH ) , partitioner = self . _partitioners . get ( GRU . BH ) , regularizer = self . _regularizers . get ( GRU . BH ) )
h_twiddle = tf . tanh ( tf . matmul ( inputs , self . _wh ) + tf . matmul ( r * prev_state , self . _uh ) + self . _bh )
state = ( 1 - z ) * prev_state + z * h_twiddle
return state , state
|
def authenticate ( self , username = None , password = None , ** kwargs ) :
"""" username " being passed is really email address and being compared to as such ."""
|
try :
user = User . objects . get ( email = username )
if user . check_password ( password ) :
return user
except ( User . DoesNotExist , User . MultipleObjectsReturned ) :
logging . warning ( 'Unsuccessful login attempt using username/email: {0}' . format ( username ) )
return None
|
def _format_data ( self , data , charset ) :
"""Format data into XML ."""
|
if data is None or data == '' :
return u''
stream = StringIO . StringIO ( )
xml = SimplerXMLGenerator ( stream , charset )
xml . startDocument ( )
xml . startElement ( self . _root_element_name ( ) , { } )
self . _to_xml ( xml , data )
xml . endElement ( self . _root_element_name ( ) )
xml . endDocument ( )
return stream . getvalue ( )
|
def match_rules ( tree , rules , fun = None , multi = False ) :
"""Matches a Tree structure with the given query rules .
Query rules are represented as a dictionary of template to action .
Action is either a function , or a dictionary of subtemplate parameter to rules : :
rules = { ' template ' : { ' key ' : rules } }
| { ' template ' : { } }
Args :
tree ( Tree ) : Parsed tree structure
rules ( dict ) : A dictionary of query rules
fun ( function ) : Function to call with context ( set to None if you want to return context )
multi ( Bool ) : If True , returns all matched contexts , else returns first matched context
Returns :
Contexts from matched rules"""
|
if multi :
context = match_rules_context_multi ( tree , rules )
else :
context = match_rules_context ( tree , rules )
if not context :
return None
if fun :
args = fun . __code__ . co_varnames
if multi :
res = [ ]
for c in context :
action_context = { }
for arg in args :
if arg in c :
action_context [ arg ] = c [ arg ]
res . append ( fun ( ** action_context ) )
return res
else :
action_context = { }
for arg in args :
if arg in context :
action_context [ arg ] = context [ arg ]
return fun ( ** action_context )
else :
return context
|
def download ( url , params = None , accept = "xml" , ** kwds ) :
"""Helper function to download a file and return its content .
Parameters
url : string
The URL to be parsed .
params : dict ( optional )
Dictionary containing query parameters . For required keys
and accepted values see e . g .
https : / / api . elsevier . com / documentation / AuthorRetrievalAPI . wadl
accept : str ( optional , default = xml )
mime type of the file to be downloaded . Accepted values are json ,
atom + xml , xml .
kwds : key - value parings , optional
Keywords passed on to as query parameters . Must contain fields
and values specified in the respective API specification .
Raises
ScopusHtmlError
If the status of the response is not ok .
ValueError
If the accept parameter is not one of the accepted values .
Returns
resp : byte - like object
The content of the file , which needs to be serialized ."""
|
# Value check
accepted = ( "json" , "xml" , "atom+xml" )
if accept . lower ( ) not in accepted :
raise ValueError ( 'accept parameter must be one of ' + ', ' . join ( accepted ) )
# Get credentials
key = config . get ( 'Authentication' , 'APIKey' )
header = { 'X-ELS-APIKey' : key }
if config . has_option ( 'Authentication' , 'InstToken' ) :
token = config . get ( 'Authentication' , 'InstToken' )
header . update ( { 'X-ELS-APIKey' : key , 'X-ELS-Insttoken' : token } )
header . update ( { 'Accept' : 'application/{}' . format ( accept ) } )
# Perform request
params . update ( ** kwds )
resp = requests . get ( url , headers = header , params = params )
# Raise error if necessary
try :
reason = resp . reason . upper ( ) + " for url: " + url
raise errors [ resp . status_code ] ( reason )
except KeyError : # Exception not specified in scopus
resp . raise_for_status ( )
# Will pass when everything is ok
return resp
|
def removeClass ( self , * classes : str ) -> None :
"""[ Not Standard ] Remove classes from this node ."""
|
_remove_cl = [ ]
for class_ in classes :
if class_ not in self . classList :
if class_ in self . get_class_list ( ) :
logger . warning ( 'tried to remove class-level class: ' '{}' . format ( class_ ) )
else :
logger . warning ( 'tried to remove non-existing class: {}' . format ( class_ ) )
else :
_remove_cl . append ( class_ )
self . classList . remove ( * _remove_cl )
|
def Run ( self , args ) :
"""Run ."""
|
# Open the file .
fd = vfs . VFSOpen ( args . pathspec , progress_callback = self . Progress )
if args . address_family == rdf_client_network . NetworkAddress . Family . INET :
family = socket . AF_INET
elif args . address_family == rdf_client_network . NetworkAddress . Family . INET6 :
family = socket . AF_INET6
else :
raise RuntimeError ( "Socket address family not supported." )
s = socket . socket ( family , socket . SOCK_STREAM )
try :
s . connect ( ( args . host , args . port ) )
except socket . error as e :
raise RuntimeError ( str ( e ) )
cipher = rdf_crypto . AES128CBCCipher ( args . key , args . iv )
streaming_encryptor = rdf_crypto . StreamingCBCEncryptor ( cipher )
while True :
data = fd . read ( self . BLOCK_SIZE )
if not data :
break
self . Send ( s , streaming_encryptor . Update ( data ) )
# Send heartbeats for long files .
self . Progress ( )
self . Send ( s , streaming_encryptor . Finalize ( ) )
s . close ( )
self . SendReply ( fd . Stat ( ) )
|
def get_obj ( self , objpath , metahash , dst_path ) :
"""Get object from cache , write it to dst _ path .
Args :
objpath : filename relative to buildroot
( example : mini - boot / blahblah / somefile . bin )
metahash : metahash . See targets / base . py
dst _ path : Absolute path where the file should be written .
Raises :
CacheMiss : if the item is not in the cache"""
|
incachepath = self . path_in_cache ( objpath , metahash )
if not os . path . exists ( incachepath ) :
raise CacheMiss ( '%s not in cache.' % incachepath )
else :
log . debug ( 'Cache hit! %s~%s' , objpath , metahash . hexdigest ( ) )
if not os . path . exists ( os . path . dirname ( dst_path ) ) :
os . makedirs ( os . path . dirname ( dst_path ) )
os . link ( incachepath , dst_path )
|
def eeg_to_df ( eeg , index = None , include = "all" , exclude = None , hemisphere = "both" , central = True ) :
"""Convert mne Raw or Epochs object to dataframe or dict of dataframes .
DOCS INCOMPLETE : ("""
|
if isinstance ( eeg , mne . Epochs ) :
data = { }
if index is None :
index = range ( len ( eeg ) )
for epoch_index , epoch in zip ( index , eeg . get_data ( ) ) :
epoch = pd . DataFrame ( epoch . T )
epoch . columns = eeg . ch_names
epoch . index = eeg . times
selection = eeg_select_electrodes ( eeg , include = include , exclude = exclude , hemisphere = hemisphere , central = central )
data [ epoch_index ] = epoch [ selection ]
else : # it might be a Raw object
data = eeg . get_data ( ) . T
data = pd . DataFrame ( data )
data . columns = eeg . ch_names
data . index = eeg . times
return ( data )
|
def search_for_vcs ( self , dependency ) : # type : ( VCSDependency ) - > List [ Package ]
"""Search for the specifications that match the given VCS dependency .
Basically , we clone the repository in a temporary directory
and get the information we need by checking out the specified reference ."""
|
if dependency . vcs != "git" :
raise ValueError ( "Unsupported VCS dependency {}" . format ( dependency . vcs ) )
tmp_dir = Path ( mkdtemp ( prefix = "pypoetry-git-{}" . format ( dependency . name ) ) )
try :
git = Git ( )
git . clone ( dependency . source , tmp_dir )
git . checkout ( dependency . reference , tmp_dir )
revision = git . rev_parse ( dependency . reference , tmp_dir ) . strip ( )
if dependency . tag or dependency . rev :
revision = dependency . reference
directory_dependency = DirectoryDependency ( dependency . name , tmp_dir , category = dependency . category , optional = dependency . is_optional ( ) , )
for extra in dependency . extras :
directory_dependency . extras . append ( extra )
package = self . search_for_directory ( directory_dependency ) [ 0 ]
package . source_type = "git"
package . source_url = dependency . source
package . source_reference = revision
except Exception :
raise
finally :
safe_rmtree ( str ( tmp_dir ) )
return [ package ]
|
def occurs_in_type ( v , type2 ) :
"""Checks whether a type variable occurs in a type expression .
Note : Must be called with v pre - pruned
Args :
v : The TypeVariable to be tested for
type2 : The type in which to search
Returns :
True if v occurs in type2 , otherwise False"""
|
pruned_type2 = prune ( type2 )
if pruned_type2 == v :
return True
elif isinstance ( pruned_type2 , TypeOperator ) :
return occurs_in ( v , pruned_type2 . types )
return False
|
def expectScreen ( self , filename , maxrms = 0 ) :
"""Wait until the display matches a target image
filename : an image file to read and compare against
maxrms : the maximum root mean square between histograms of the
screen and target image"""
|
log . debug ( 'expectScreen %s' , filename )
return self . _expectFramebuffer ( filename , 0 , 0 , maxrms )
|
def intermediate_cpfs ( self ) -> List [ CPF ] :
'''Returns list of intermediate - fluent CPFs in level order .'''
|
_ , cpfs = self . cpfs
interm_cpfs = [ cpf for cpf in cpfs if cpf . name in self . intermediate_fluents ]
interm_cpfs = sorted ( interm_cpfs , key = lambda cpf : ( self . intermediate_fluents [ cpf . name ] . level , cpf . name ) )
return interm_cpfs
|
def _json_to_subscriptions ( response_body ) :
"""Returns a list of Subscription objects"""
|
data = json . loads ( response_body )
subscriptions = [ ]
for subscription_data in data . get ( "subscriptionList" , [ ] ) :
subscriptions . append ( Subscription ( ) . from_json ( data . get ( 'uwNetID' ) , subscription_data ) )
return subscriptions
|
def validate ( self , value , validator ) :
"""Validates and returns the value .
If the value does not validate against the schema , SchemaValidationError
will be raised .
: param value : A value to validate ( usually a dict ) .
: param validator : An instance of a jsonschema validator class , as
created by Schema . get _ validator ( ) .
: returns : the passed value .
: raises SchemaValidationError :
: raises Exception :"""
|
try :
validator . validate ( value )
except Exception as e :
logging . debug ( e , exc_info = e )
if isinstance ( e , DoctorError ) :
raise
else : # Gather all the validation errors
validation_errors = sorted ( validator . iter_errors ( value ) , key = lambda e : e . path )
errors = { }
for error in validation_errors :
try :
key = error . path [ 0 ]
except IndexError :
key = '_other'
errors [ key ] = error . args [ 0 ]
raise SchemaValidationError ( e . args [ 0 ] , errors = errors )
return value
|
def dbus_readBytesTwoFDs ( self , fd1 , fd2 , byte_count ) :
"""Reads byte _ count from fd1 and fd2 . Returns concatenation ."""
|
result = bytearray ( )
for fd in ( fd1 , fd2 ) :
f = os . fdopen ( fd , 'rb' )
result . extend ( f . read ( byte_count ) )
f . close ( )
return result
|
def speziale_grun ( v , v0 , gamma0 , q0 , q1 ) :
"""calculate Gruneisen parameter for the Speziale equation
: param v : unit - cell volume in A ^ 3
: param v0 : unit - cell volume in A ^ 3 at 1 bar
: param gamma0 : Gruneisen parameter at 1 bar
: param q0 : logarithmic derivative of Gruneisen parameter
: param q1 : logarithmic derivative of Gruneisen parameter
: return : Gruneisen parameter"""
|
if isuncertainties ( [ v , v0 , gamma0 , q0 , q1 ] ) :
gamma = gamma0 * unp . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) )
else :
gamma = gamma0 * np . exp ( q0 / q1 * ( ( v / v0 ) ** q1 - 1. ) )
return gamma
|
def scrape_wikinews ( conn , project , articleset , query ) :
"""Scrape wikinews articles from the given query
@ param conn : The AmcatAPI object
@ param articleset : The target articleset ID
@ param category : The wikinews category name"""
|
url = "http://en.wikinews.org/w/index.php?search={}&limit=50" . format ( query )
logging . info ( url )
for page in get_pages ( url ) :
urls = get_article_urls ( page )
arts = list ( get_articles ( urls ) )
logging . info ( "Adding {} articles to set {}:{}" . format ( len ( arts ) , project , articleset ) )
conn . create_articles ( project = project , articleset = articleset , json_data = arts )
|
def wait_for_n_keypresses ( self , key , n = 1 ) :
"""Waits till one key was pressed n times .
: param key : the key to be pressed as defined by pygame . E . g .
pygame . K _ LEFT for the left arrow key
: type key : int
: param n : number of repetitions till the function returns
: type n : int"""
|
my_const = "key_consumed"
counter = 0
def keypress_listener ( e ) :
return my_const if e . type == pygame . KEYDOWN and e . key == key else EventConsumerInfo . DONT_CARE
while counter < n :
if self . listen ( keypress_listener ) == my_const :
counter += 1
|
def get_current_transport_info ( self ) :
"""Get the current playback state .
Returns :
dict : The following information about the
speaker ' s playing state :
* current _ transport _ state ( ` ` PLAYING ` ` , ` ` TRANSITIONING ` ` ,
` ` PAUSED _ PLAYBACK ` ` , ` ` STOPPED ` ` )
* current _ transport _ status ( OK , ? )
* current _ speed ( 1 , ? )
This allows us to know if speaker is playing or not . Don ' t know other
states of CurrentTransportStatus and CurrentSpeed ."""
|
response = self . avTransport . GetTransportInfo ( [ ( 'InstanceID' , 0 ) , ] )
playstate = { 'current_transport_status' : '' , 'current_transport_state' : '' , 'current_transport_speed' : '' }
playstate [ 'current_transport_state' ] = response [ 'CurrentTransportState' ]
playstate [ 'current_transport_status' ] = response [ 'CurrentTransportStatus' ]
playstate [ 'current_transport_speed' ] = response [ 'CurrentSpeed' ]
return playstate
|
def max_interval_intersec ( S ) :
"""determine a value that is contained in a largest number of given intervals
: param S : list of half open intervals
: complexity : O ( n log n ) , where n = len ( S )"""
|
B = ( [ ( left , + 1 ) for left , right in S ] + [ ( right , - 1 ) for left , right in S ] )
B . sort ( )
c = 0
best = ( c , None )
for x , d in B :
c += d
if best [ 0 ] < c :
best = ( c , x )
return best
|
def null_space ( M , k , k_skip = 1 , eigen_solver = 'arpack' , random_state = None , solver_kwds = None ) :
"""Find the null space of a matrix M : eigenvectors associated with 0 eigenvalues
Parameters
M : { array , matrix , sparse matrix , LinearOperator }
Input covariance matrix : should be symmetric positive semi - definite
k : integer
Number of eigenvalues / vectors to return
k _ skip : integer , optional
Number of low eigenvalues to skip .
eigen _ solver : { ' auto ' , ' dense ' , ' arpack ' , ' lobpcg ' , or ' amg ' }
' auto ' :
algorithm will attempt to choose the best method for input data
' dense ' :
use standard dense matrix operations for the eigenvalue decomposition .
For this method , M must be an array or matrix type . This method should be avoided for large problems .
' arpack ' :
use arnoldi iteration in shift - invert mode . For this method ,
M may be a dense matrix , sparse matrix , or general linear operator .
Warning : ARPACK can be unstable for some problems . It is best to
try several random seeds in order to check results .
' lobpcg ' :
Locally Optimal Block Preconditioned Conjugate Gradient Method .
A preconditioned eigensolver for large symmetric positive definite
( SPD ) generalized eigenproblems .
' amg ' :
AMG requires pyamg to be installed . It can be faster on very large ,
sparse problems , but may also lead to instabilities .
random _ state : numpy . RandomState or int , optional
The generator or seed used to determine the starting vector for arpack
iterations . Defaults to numpy . random .
solver _ kwds : any additional keyword arguments to pass to the selected eigen _ solver
Returns
null _ space : estimated k vectors of the null space
error : estimated error ( sum of eigenvalues )
Notes
dense solver key words : see
http : / / docs . scipy . org / doc / scipy - 0.14.0 / reference / generated / scipy . linalg . eigh . html
for symmetric problems and
http : / / docs . scipy . org / doc / scipy - 0.14.0 / reference / generated / scipy . linalg . eig . html # scipy . linalg . eig
for non symmetric problems .
arpack sovler key words : see
http : / / docs . scipy . org / doc / scipy - 0.14.0 / reference / generated / scipy . sparse . linalg . eigsh . html
for symmetric problems and http : / / docs . scipy . org / doc / scipy - 0.14.0 / reference / generated / scipy . sparse . linalg . eigs . html # scipy . sparse . linalg . eigs
for non symmetric problems .
lobpcg solver keywords : see
http : / / docs . scipy . org / doc / scipy / reference / generated / scipy . sparse . linalg . lobpcg . html
amg solver keywords : see
http : / / pyamg . googlecode . com / svn / branches / 1.0 . x / Docs / html / pyamg . aggregation . html # module - pyamg . aggregation . aggregation
( Note amg solver uses lobpcg and also accepts lobpcg keywords )"""
|
eigen_solver , solver_kwds = check_eigen_solver ( eigen_solver , solver_kwds , size = M . shape [ 0 ] , nvec = k + k_skip )
random_state = check_random_state ( random_state )
if eigen_solver == 'arpack' : # This matches the internal initial state used by ARPACK
v0 = random_state . uniform ( - 1 , 1 , M . shape [ 0 ] )
try :
eigen_values , eigen_vectors = eigsh ( M , k + k_skip , sigma = 0.0 , v0 = v0 , ** ( solver_kwds or { } ) )
except RuntimeError as msg :
raise ValueError ( "Error in determining null-space with ARPACK. " "Error message: '%s'. " "Note that method='arpack' can fail when the " "weight matrix is singular or otherwise " "ill-behaved. method='dense' is recommended. " "See online documentation for more information." % msg )
return eigen_vectors [ : , k_skip : ] , np . sum ( eigen_values [ k_skip : ] )
elif eigen_solver == 'dense' :
if hasattr ( M , 'toarray' ) :
M = M . toarray ( )
eigen_values , eigen_vectors = eigh ( M , eigvals = ( 0 , k + k_skip ) , overwrite_a = True , ** ( solver_kwds or { } ) )
index = np . argsort ( np . abs ( eigen_values ) )
eigen_vectors = eigen_vectors [ : , index ]
eigen_values = eigen_values [ index ]
return eigen_vectors [ : , k_skip : k + 1 ] , np . sum ( eigen_values [ k_skip : k + 1 ] )
# eigen _ values , eigen _ vectors = eigh (
# M , eigvals = ( k _ skip , k + k _ skip - 1 ) , overwrite _ a = True )
# index = np . argsort ( np . abs ( eigen _ values ) )
# return eigen _ vectors [ : , index ] , np . sum ( eigen _ values )
elif ( eigen_solver == 'amg' or eigen_solver == 'lobpcg' ) : # M should be positive semi - definite . Add 1 to make it pos . def .
try :
M = sparse . identity ( M . shape [ 0 ] ) + M
n_components = min ( k + k_skip + 10 , M . shape [ 0 ] )
eigen_values , eigen_vectors = eigen_decomposition ( M , n_components , eigen_solver = eigen_solver , drop_first = False , largest = False , random_state = random_state , solver_kwds = solver_kwds )
eigen_values = eigen_values - 1
index = np . argsort ( np . abs ( eigen_values ) )
eigen_values = eigen_values [ index ]
eigen_vectors = eigen_vectors [ : , index ]
return eigen_vectors [ : , k_skip : k + 1 ] , np . sum ( eigen_values [ k_skip : k + 1 ] )
except np . linalg . LinAlgError : # try again with bigger increase
warnings . warn ( "LOBPCG failed the first time. Increasing Pos Def adjustment." )
M = 2.0 * sparse . identity ( M . shape [ 0 ] ) + M
n_components = min ( k + k_skip + 10 , M . shape [ 0 ] )
eigen_values , eigen_vectors = eigen_decomposition ( M , n_components , eigen_solver = eigen_solver , drop_first = False , largest = False , random_state = random_state , solver_kwds = solver_kwds )
eigen_values = eigen_values - 2
index = np . argsort ( np . abs ( eigen_values ) )
eigen_values = eigen_values [ index ]
eigen_vectors = eigen_vectors [ : , index ]
return eigen_vectors [ : , k_skip : k + 1 ] , np . sum ( eigen_values [ k_skip : k + 1 ] )
else :
raise ValueError ( "Unrecognized eigen_solver '%s'" % eigen_solver )
|
def txn_storeAssociation ( self , server_url , association ) :
"""Set the association for the server URL .
Association - > NoneType"""
|
a = association
self . db_set_assoc ( server_url , a . handle , self . blobEncode ( a . secret ) , a . issued , a . lifetime , a . assoc_type )
|
def do_p ( self , arg ) :
"""p expression
Print the value of the expression ."""
|
try :
self . message ( bdb . safe_repr ( self . _getval ( arg ) ) )
except Exception :
pass
|
async def download_file ( self , file_path : base . String , destination : Optional [ base . InputFile ] = None , timeout : Optional [ base . Integer ] = sentinel , chunk_size : Optional [ base . Integer ] = 65536 , seek : Optional [ base . Boolean ] = True ) -> Union [ io . BytesIO , io . FileIO ] :
"""Download file by file _ path to destination
if You want to automatically create destination ( : class : ` io . BytesIO ` ) use default
value of destination and handle result of this method .
: param file _ path : file path on telegram server ( You can get it from : obj : ` aiogram . types . File ` )
: type file _ path : : obj : ` str `
: param destination : filename or instance of : class : ` io . IOBase ` . For e . g . : class : ` io . BytesIO `
: param timeout : Integer
: param chunk _ size : Integer
: param seek : Boolean - go to start of file when downloading is finished .
: return : destination"""
|
if destination is None :
destination = io . BytesIO ( )
url = api . Methods . file_url ( token = self . __token , path = file_path )
dest = destination if isinstance ( destination , io . IOBase ) else open ( destination , 'wb' )
async with self . session . get ( url , timeout = timeout , proxy = self . proxy , proxy_auth = self . proxy_auth ) as response :
while True :
chunk = await response . content . read ( chunk_size )
if not chunk :
break
dest . write ( chunk )
dest . flush ( )
if seek :
dest . seek ( 0 )
return dest
|
def plot_rank ( data , var_names = None , coords = None , bins = None , ref_line = True , figsize = None , axes = None ) :
"""Plot rank order statistics of chains .
From the paper : Rank plots are histograms of the ranked posterior
draws ( ranked over all chains ) plotted separately for each chain .
If all of the chains are targeting the same posterior , we expect
the ranks in each chain to be uniform , whereas if one chain has a
different location or scale parameter , this will be reflected in
the deviation from uniformity . If rank plots of all chains look
similar , this indicates good mixing of the chains .
This plot was introduced by Aki Vehtari , Andrew Gelman , Daniel
Simpson , Bob Carpenter , Paul - Christian Burkner ( 2019 ) :
Rank - normalization , folding , and localization : An improved R - hat
for assessing convergence of MCMC .
arXiv preprint https : / / arxiv . org / abs / 1903.08008
Parameters
data : obj
Any object that can be converted to an az . InferenceData object
Refer to documentation of az . convert _ to _ dataset for details
var _ names : string or list of variable names
Variables to be plotted
coords : mapping , optional
Coordinates of var _ names to be plotted . Passed to ` Dataset . sel `
bins : None or passed to np . histogram
Binning strategy used for histogram . By default uses twice the
result of Sturges ' formula . See ` np . histogram ` documenation for
other available arguments .
ref _ line : boolean
Whether to include a dashed line showing where a uniform
distribution would lie
figsize : tuple
Figure size . If None it will be defined automatically .
ax : axes
Matplotlib axes . Defaults to None .
Returns
ax : matplotlib axes
Examples
Show a default rank plot
. . plot : :
: context : close - figs
> > > import arviz as az
> > > data = az . load _ arviz _ data ( ' centered _ eight ' )
> > > az . plot _ rank ( data )
Recreate Figure 13 from the arxiv preprint
. . plot : :
: context : close - figs
> > > import arviz as az
> > > data = az . load _ arviz _ data ( ' centered _ eight ' )
> > > az . plot _ rank ( data , var _ names = ' tau ' )"""
|
posterior_data = convert_to_dataset ( data , group = "posterior" )
if coords is not None :
posterior_data = posterior_data . sel ( ** coords )
var_names = _var_names ( var_names , posterior_data )
plotters = list ( xarray_var_iter ( posterior_data , var_names = var_names , combined = True ) )
if bins is None : # Use double Sturges ' formula
bins = _sturges_formula ( posterior_data , mult = 2 )
if axes is None :
rows , cols = default_grid ( len ( plotters ) )
figsize , ax_labelsize , titlesize , _ , _ , _ = _scale_fig_size ( figsize , None , rows = rows , cols = cols )
_ , axes = _create_axes_grid ( len ( plotters ) , rows , cols , figsize = figsize , squeeze = False )
for ax , ( var_name , selection , var_data ) in zip ( axes . ravel ( ) , plotters ) :
ranks = scipy . stats . rankdata ( var_data ) . reshape ( var_data . shape )
all_counts = [ ]
for row in ranks :
counts , bin_ary = np . histogram ( row , bins = bins , range = ( 0 , ranks . size ) )
all_counts . append ( counts )
all_counts = np . array ( all_counts )
gap = all_counts . max ( ) * 1.05
width = bin_ary [ 1 ] - bin_ary [ 0 ]
# Center the bins
bin_ary = ( bin_ary [ 1 : ] + bin_ary [ : - 1 ] ) / 2
y_ticks = [ ]
for idx , counts in enumerate ( all_counts ) :
y_ticks . append ( idx * gap )
if ref_line : # Line where data is uniform
ax . axhline ( y = y_ticks [ - 1 ] + counts . mean ( ) , linestyle = "--" , color = "C1" )
# fake an x - axis
ax . axhline ( y = y_ticks [ - 1 ] , color = "k" , lw = 1 )
ax . bar ( bin_ary , counts , bottom = y_ticks [ - 1 ] , width = width , align = "center" , color = "C0" , edgecolor = ax . get_facecolor ( ) , )
ax . set_xlabel ( "Rank (all chains)" , fontsize = ax_labelsize )
ax . set_ylabel ( "Chain" , fontsize = ax_labelsize )
ax . set_yticks ( y_ticks )
ax . set_yticklabels ( np . arange ( len ( y_ticks ) ) )
ax . set_title ( make_label ( var_name , selection ) , fontsize = titlesize )
return axes
|
def flush ( self ) :
"""Commit backing storage to disk
This method is largely internal , and it is not necessary to call this
from user code . It should not be explicitly invoked and may be removed
in future versions ."""
|
if self . kind == TraceField :
self . filehandle . putth ( self . traceno , self . buf )
elif self . kind == BinField :
self . filehandle . putbin ( self . buf )
else :
msg = 'Object corrupted: kind {} not valid'
raise RuntimeError ( msg . format ( self . kind ) )
|
def content_break ( self , el ) :
"""Break on specified boundaries ."""
|
should_break = False
if self . type == 'odp' :
if el . name == 'page' and el . namespace and el . namespace == self . namespaces [ 'draw' ] :
should_break = True
return should_break
|
def parse ( some_text , ** kwargs ) :
"""Creates request to AddressParser
and returns list of Address objects"""
|
ap = parser . AddressParser ( ** kwargs )
return ap . parse ( some_text )
|
def get ( cls , xuid , scid , clip_id ) :
'''Gets a specific game clip
: param xuid : xuid of an xbox live user
: param scid : scid of a clip
: param clip _ id : id of a clip'''
|
url = ( 'https://gameclipsmetadata.xboxlive.com/users' '/xuid(%(xuid)s)/scids/%(scid)s/clips/%(clip_id)s' % { 'xuid' : xuid , 'scid' : scid , 'clip_id' : clip_id , } )
resp = xbox . client . _get ( url )
# scid does not seem to matter when fetching clips ,
# as long as it looks like a uuid it should be fine .
# perhaps we ' ll raise an exception in future
if resp . status_code == 404 :
msg = 'Could not find clip: xuid=%s, scid=%s, clip_id=%s' % ( xuid , scid , clip_id , )
raise ClipNotFound ( msg )
data = resp . json ( )
# as we don ' t have the user object let ' s
# create a lazily evaluated proxy object
# that will fetch it only when required
user = UserProxy ( xuid )
return cls ( user , data [ 'gameClip' ] )
|
def getPID ( self ) :
"""Returns the PID for the associated app
( or - 1 , if no app is associated or the app is not running )"""
|
if self . _pid is not None :
if not PlatformManager . isPIDValid ( self . _pid ) :
self . _pid = - 1
return self . _pid
return - 1
|
def update ( self , * , name = None , position = None , visibility = None ) :
"""Changes the name , position or visibility of this worksheet"""
|
if name is None and position is None and visibility is None :
raise ValueError ( 'Provide at least one parameter to update' )
data = { }
if name :
data [ 'name' ] = name
if position :
data [ 'position' ] = position
if visibility :
data [ 'visibility' ] = visibility
response = self . session . patch ( self . build_url ( '' ) , data = data )
if not response :
return False
data = response . json ( )
self . name = data . get ( 'name' , self . name )
self . position = data . get ( 'position' , self . position )
self . visibility = data . get ( 'visibility' , self . visibility )
return True
|
def _get_rs_id ( variant , rs_map , variant_type ) :
"""Given a variant dict , return unambiguous RS ID
TODO
Some sequence alterations appear to have mappings to dbsnp ' s notation
for example ,
reference allele : TTTTT
variant allele : TTTTT
Is theoretically the same as - / T , we should clarify with UDP and then add
functionality to map this notation to the more common - / T
: param variant :
: param rs _ map :
: param type : snp or indel
: return :"""
|
rs_id = None
if variant_type == 'snp' :
variant_key = "{0}-{1}" . format ( variant [ 'chromosome' ] , variant [ 'position' ] )
if variant_key in rs_map :
snp_candidates = [ rs_dict for rs_dict in rs_map [ variant_key ] if rs_dict [ 'type' ] == 'snp' ]
if len ( snp_candidates ) == 1 :
rs_id = snp_candidates [ 0 ] [ "rs_id" ]
elif variant_type == 'indel' :
rs_candidates = [ ]
variant_key = "{0}-{1}" . format ( variant [ 'chromosome' ] , variant [ 'position' ] )
if variant_key in rs_map :
snp_candidates = [ rs_dict for rs_dict in rs_map [ variant_key ] if rs_dict [ 'type' ] == 'in-del' ]
for candidate in snp_candidates :
alleles = candidate [ 'alleles' ] . split ( '/' )
if variant [ 'reference_allele' ] in alleles and variant [ 'variant_allele' ] in alleles :
rs_candidates . append ( candidate [ 'rs_id' ] )
if len ( rs_candidates ) == 1 :
rs_id = rs_candidates [ 0 ]
elif len ( rs_candidates ) > 1 :
LOG . info ( "ambiguous rs mapping for: %s\ncandidate ids: %s" , variant , rs_candidates )
else :
LOG . info ( "rs at coordinate but no match found" " for variant %s\n candidate ids: %s" , variant , rs_map [ variant_key ] )
else :
LOG . warning ( "type: %s unsupported" , variant_type )
return rs_id
|
def set_key ( key , value , host = None , port = None , db = None , password = None ) :
'''Set redis key value
CLI Example :
. . code - block : : bash
salt ' * ' redis . set _ key foo bar'''
|
server = _connect ( host , port , db , password )
return server . set ( key , value )
|
def eagle ( args ) :
"""% prog eagle fastafile"""
|
p = OptionParser ( eagle . __doc__ )
p . add_option ( "--share" , default = "/usr/local/share/EAGLE/" , help = "Default EAGLE share path" )
add_sim_options ( p )
opts , args = p . parse_args ( args )
if len ( args ) != 1 :
sys . exit ( not p . print_help ( ) )
fastafile , = args
share = opts . share
depth = opts . depth
readlen = opts . readlen
distance = opts . distance
pf = op . basename ( fastafile ) . split ( "." ) [ 0 ]
# Since EAGLE does not natively support read length other than 100bp and
# 250bp - for an arbitrary read length we need to generate a bunch of
# support files
# First file is the Runinfo
runinfo_readlen = "RunInfo_PairedReads2x{}Cycles1x1Tiles.xml" . format ( readlen )
if not op . exists ( runinfo_readlen ) :
runinfo = op . join ( share , "RunInfo/RunInfo_PairedReads2x251Cycles1x1Tiles.xml" )
runinfo_xml = open ( runinfo ) . read ( )
runinfo_xml = runinfo_xml . replace ( "251" , str ( readlen ) ) . replace ( "252" , str ( readlen + 1 ) ) . replace ( "502" , str ( 2 * readlen ) )
fw = open ( runinfo_readlen , "w" )
print ( runinfo_xml . strip ( ) , file = fw )
fw . close ( )
# Generate quality profiles
quality_file1 = "QualityTable.read1.length{}.qval" . format ( readlen )
quality_file2 = "QualityTable.read2.length{}.qval" . format ( readlen )
if not ( op . exists ( quality_file1 ) and op . exists ( quality_file2 ) ) :
for i , qq in enumerate ( [ quality_file1 , quality_file2 ] ) :
cmd = "/usr/local/libexec/EAGLE/scaleQualityTable.pl"
cmd += " --input {}" . format ( op . join ( share , "QualityTables/DefaultQualityTable.read{}.length101.qval" . format ( i + 1 ) ) )
cmd += " --cycles {}" . format ( readlen )
cmd += " --output {}" . format ( qq )
sh ( cmd , silent = True )
# Since distance is different from the default distribution which is
# centered around 319 , we shift our peak to the new peak
template_lengths = op . join ( share , "TemplateLengthTables/DefaultTemplateLengthTable.tsv" )
template_distance = "TemplateLengthTable{}.tsv" . format ( distance )
shift = distance - 319
if not op . exists ( template_distance ) :
fp = open ( template_lengths )
fw = open ( template_distance , "w" )
for row in fp :
size , counts = row . split ( )
size = int ( size )
counts = int ( counts )
size += shift
if size < readlen :
continue
print ( "\t" . join ( str ( x ) for x in ( size , counts ) ) , file = fw )
fw . close ( )
# All done , let ' s simulate !
cmd = "configureEAGLE.pl"
cmd += " --reference-genome {}" . format ( fastafile )
cmd += " --coverage-depth {}" . format ( depth )
cmd += " --gc-coverage-fit-table {}" . format ( op . join ( share , "GcCoverageFitTables/Homo_sapiens.example1.tsv" ) )
cmd += " --run-info {}" . format ( runinfo_readlen )
cmd += " --quality-table {}" . format ( quality_file1 )
cmd += " --quality-table {}" . format ( quality_file2 )
cmd += " --template-length-table {}" . format ( template_distance )
cmd += " --random-seed {}" . format ( random . randint ( 1 , 65535 ) )
sh ( cmd , silent = True )
# Retrieve results
outpf = opts . outfile or "{0}.{1}bp.{2}x" . format ( pf , distance , depth )
outpf += ".bwa"
cwd = os . getcwd ( )
eagle_dir = "EAGLE"
os . chdir ( eagle_dir )
sh ( "make bam" , silent = True )
# Convert BAM to FASTQ
from jcvi . formats . sam import fastq
a , b = fastq ( [ "eagle.bam" , outpf ] )
sh ( "mv {} {} ../" . format ( a , b ) )
os . chdir ( cwd )
# Clean - up
shutil . rmtree ( eagle_dir )
|
def read_config ( config_path = CONFIG_PATH ) :
"""Read the config information from the config file .
Args :
config _ path ( str ) : Relative path to the email config file .
Returns :
defaultdict : A defaultdict with the config information .
Raises :
IOError"""
|
if not os . path . isfile ( config_path ) :
raise IOError ( "No config file found at %s" % config_path )
config_parser = configparser . ConfigParser ( )
config_parser . read ( config_path )
config = _config_parser_to_defaultdict ( config_parser )
return config
|
def search_pages ( self , page_ids = None , begin = 0 , count = 10 ) :
"""查询页面列表
详情请参考
http : / / mp . weixin . qq . com / wiki / 5/6626199ea8757c752046d8e46cf13251 . html
: param page _ ids : 指定页面的id列表
: param begin : 页面列表的起始索引值
: param count : 待查询的页面个数
: return : 页面查询结果信息"""
|
if not page_ids :
data = { 'type' : 2 , 'begin' : begin , 'count' : count }
else :
if not isinstance ( page_ids , ( tuple , list ) ) :
page_ids = [ page_ids ]
data = { 'type' : 1 , 'page_ids' : page_ids }
res = self . _post ( 'shakearound/page/search' , data = data , result_processor = lambda x : x [ 'data' ] )
return res
|
def fpy_interface ( fpy , static , interface , typedict ) :
"""Splices the full list of subroutines and the module procedure list
into the static . f90 file .
: arg static : the string contents of the static . f90 file .
: arg interface : the name of the interface * field * being replaced .
: arg typedict : the dictionary of dtypes and their kind and suffix combos ."""
|
modprocs = [ ]
subtext = [ ]
for dtype , combos in list ( typedict . items ( ) ) :
for tcombo in combos :
kind , suffix = tcombo
xnames , sub = fpy_interface_sub ( fpy , dtype , kind , suffix )
modprocs . extend ( xnames )
subtext . append ( sub )
subtext . append ( "\n" )
# Next , chunk the names of the module procedures into blocks of five
# so that they display nicely for human readability .
from fortpy . printing . formatting import present_params
splice = static . replace ( interface , present_params ( modprocs , 21 ) )
return splice . replace ( interface . replace ( "py" , "xpy" ) , '' . join ( subtext ) )
|
def modifyInPlace ( self , * , sort = None , purge = False , done = None ) :
"""Like Model . modify , but changes existing database instead of
returning a new one ."""
|
self . data = self . modify ( sort = sort , purge = purge , done = done )
|
def _decimal_to_128 ( value ) :
"""Converts a decimal . Decimal to BID ( high bits , low bits ) .
: Parameters :
- ` value ` : An instance of decimal . Decimal"""
|
with decimal . localcontext ( _DEC128_CTX ) as ctx :
value = ctx . create_decimal ( value )
if value . is_infinite ( ) :
return _NINF if value . is_signed ( ) else _PINF
sign , digits , exponent = value . as_tuple ( )
if value . is_nan ( ) :
if digits :
raise ValueError ( "NaN with debug payload is not supported" )
if value . is_snan ( ) :
return _NSNAN if value . is_signed ( ) else _PSNAN
return _NNAN if value . is_signed ( ) else _PNAN
significand = int ( "" . join ( [ str ( digit ) for digit in digits ] ) )
bit_length = _bit_length ( significand )
high = 0
low = 0
for i in range ( min ( 64 , bit_length ) ) :
if significand & ( 1 << i ) :
low |= 1 << i
for i in range ( 64 , bit_length ) :
if significand & ( 1 << i ) :
high |= 1 << ( i - 64 )
biased_exponent = exponent + _EXPONENT_BIAS
if high >> 49 == 1 :
high = high & 0x7fffffffffff
high |= _EXPONENT_MASK
high |= ( biased_exponent & 0x3fff ) << 47
else :
high |= biased_exponent << 49
if sign :
high |= _SIGN
return high , low
|
def switch_to_table ( self , event ) :
"""Switches grid to table
Parameters
event . newtable : Integer
\t Table that the grid is switched to"""
|
newtable = event . newtable
no_tabs = self . grid . code_array . shape [ 2 ] - 1
if 0 <= newtable <= no_tabs :
self . grid . current_table = newtable
self . grid . SetToolTip ( None )
# Delete renderer cache
self . grid . grid_renderer . cell_cache . clear ( )
# Delete video cells
video_cells = self . grid . grid_renderer . video_cells
for key in video_cells :
video_panel = video_cells [ key ]
video_panel . player . stop ( )
video_panel . player . release ( )
video_panel . Destroy ( )
video_cells . clear ( )
# Hide cell editor
cell_editor = self . grid . GetCellEditor ( self . grid . GetGridCursorRow ( ) , self . grid . GetGridCursorCol ( ) )
try :
cell_editor . Reset ( )
except AttributeError : # No cell editor open
pass
self . grid . HideCellEditControl ( )
# Change value of entry _ line and table choice
post_command_event ( self . main_window , self . TableChangedMsg , table = newtable )
# Reset row heights and column widths by zooming
self . zoom ( )
|
def draw_scatter_plot ( world , size , target ) :
"""This function can be used on a generic canvas ( either an image to save
on disk or a canvas part of a GUI )"""
|
# Find min and max values of humidity and temperature on land so we can
# normalize temperature and humidity to the chart
humid = numpy . ma . masked_array ( world . layers [ 'humidity' ] . data , mask = world . layers [ 'ocean' ] . data )
temp = numpy . ma . masked_array ( world . layers [ 'temperature' ] . data , mask = world . layers [ 'ocean' ] . data )
min_humidity = humid . min ( )
max_humidity = humid . max ( )
min_temperature = temp . min ( )
max_temperature = temp . max ( )
temperature_delta = max_temperature - min_temperature
humidity_delta = max_humidity - min_humidity
# set all pixels white
for y in range ( 0 , size ) :
for x in range ( 0 , size ) :
target . set_pixel ( x , y , ( 255 , 255 , 255 , 255 ) )
# fill in ' bad ' boxes with grey
h_values = [ '62' , '50' , '37' , '25' , '12' ]
t_values = [ 0 , 1 , 2 , 3 , 5 ]
for loop in range ( 0 , 5 ) :
h_min = ( size - 1 ) * ( ( world . layers [ 'humidity' ] . quantiles [ h_values [ loop ] ] - min_humidity ) / humidity_delta )
if loop != 4 :
h_max = ( size - 1 ) * ( ( world . layers [ 'humidity' ] . quantiles [ h_values [ loop + 1 ] ] - min_humidity ) / humidity_delta )
else :
h_max = size
v_max = ( size - 1 ) * ( ( world . layers [ 'temperature' ] . thresholds [ t_values [ loop ] ] [ 1 ] - min_temperature ) / temperature_delta )
if h_min < 0 :
h_min = 0
if h_max > size :
h_max = size
if v_max < 0 :
v_max = 0
if v_max > ( size - 1 ) :
v_max = size - 1
if h_max > 0 and h_min < size and v_max > 0 :
for y in range ( int ( h_min ) , int ( h_max ) ) :
for x in range ( 0 , int ( v_max ) ) :
target . set_pixel ( x , ( size - 1 ) - y , ( 128 , 128 , 128 , 255 ) )
# draw lines based on thresholds
for t in range ( 0 , 6 ) :
v = ( size - 1 ) * ( ( world . layers [ 'temperature' ] . thresholds [ t ] [ 1 ] - min_temperature ) / temperature_delta )
if 0 < v < size :
for y in range ( 0 , size ) :
target . set_pixel ( int ( v ) , ( size - 1 ) - y , ( 0 , 0 , 0 , 255 ) )
ranges = [ '87' , '75' , '62' , '50' , '37' , '25' , '12' ]
for p in ranges :
h = ( size - 1 ) * ( ( world . layers [ 'humidity' ] . quantiles [ p ] - min_humidity ) / humidity_delta )
if 0 < h < size :
for x in range ( 0 , size ) :
target . set_pixel ( x , ( size - 1 ) - int ( h ) , ( 0 , 0 , 0 , 255 ) )
# draw gamma curve
curve_gamma = world . gamma_curve
curve_bonus = world . curve_offset
for x in range ( 0 , size ) :
y = ( size - 1 ) * ( ( numpy . power ( ( float ( x ) / ( size - 1 ) ) , curve_gamma ) * ( 1 - curve_bonus ) ) + curve_bonus )
target . set_pixel ( x , ( size - 1 ) - int ( y ) , ( 255 , 0 , 0 , 255 ) )
# examine all cells in the map and if it is land get the temperature and
# humidity for the cell .
for y in range ( world . height ) :
for x in range ( world . width ) :
if world . is_land ( ( x , y ) ) :
t = world . temperature_at ( ( x , y ) )
p = world . humidity_at ( ( x , y ) )
# get red and blue values depending on temperature and humidity
if world . is_temperature_polar ( ( x , y ) ) :
r = 0
elif world . is_temperature_alpine ( ( x , y ) ) :
r = 42
elif world . is_temperature_boreal ( ( x , y ) ) :
r = 85
elif world . is_temperature_cool ( ( x , y ) ) :
r = 128
elif world . is_temperature_warm ( ( x , y ) ) :
r = 170
elif world . is_temperature_subtropical ( ( x , y ) ) :
r = 213
elif world . is_temperature_tropical ( ( x , y ) ) :
r = 255
if world . is_humidity_superarid ( ( x , y ) ) :
b = 32
elif world . is_humidity_perarid ( ( x , y ) ) :
b = 64
elif world . is_humidity_arid ( ( x , y ) ) :
b = 96
elif world . is_humidity_semiarid ( ( x , y ) ) :
b = 128
elif world . is_humidity_subhumid ( ( x , y ) ) :
b = 160
elif world . is_humidity_humid ( ( x , y ) ) :
b = 192
elif world . is_humidity_perhumid ( ( x , y ) ) :
b = 224
elif world . is_humidity_superhumid ( ( x , y ) ) :
b = 255
# calculate x and y position based on normalized temperature and humidity
nx = ( size - 1 ) * ( ( t - min_temperature ) / temperature_delta )
ny = ( size - 1 ) * ( ( p - min_humidity ) / humidity_delta )
target . set_pixel ( int ( nx ) , ( size - 1 ) - int ( ny ) , ( r , 128 , b , 255 ) )
|
def string ( self , * args ) :
"""return string stored in node"""
|
data = self . bytes ( * args )
if data is not None :
return data . rstrip ( b"\x00" ) . decode ( 'utf-8' )
|
def wrap_text ( paragraph , line_count , min_char_per_line = 0 ) :
"""Wraps the given text to the specified number of lines ."""
|
one_string = strip_all_white_space ( paragraph )
if min_char_per_line :
lines = wrap ( one_string , width = min_char_per_line )
try :
return lines [ : line_count ]
except IndexError :
return lines
else :
return wrap ( one_string , len ( one_string ) / line_count )
|
def read_tb ( path ) :
"""path : a tensorboard file OR a directory , where we will find all TB files
of the form events . *"""
|
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp . isdir ( path ) :
fnames = glob ( osp . join ( path , "events.*" ) )
elif osp . basename ( path ) . startswith ( "events." ) :
fnames = [ path ]
else :
raise NotImplementedError ( "Expected tensorboard file or directory containing them. Got %s" % path )
tag2pairs = defaultdict ( list )
maxstep = 0
for fname in fnames :
for summary in tf . train . summary_iterator ( fname ) :
if summary . step > 0 :
for v in summary . summary . value :
pair = ( summary . step , v . simple_value )
tag2pairs [ v . tag ] . append ( pair )
maxstep = max ( summary . step , maxstep )
data = np . empty ( ( maxstep , len ( tag2pairs ) ) )
data [ : ] = np . nan
tags = sorted ( tag2pairs . keys ( ) )
for ( colidx , tag ) in enumerate ( tags ) :
pairs = tag2pairs [ tag ]
for ( step , value ) in pairs :
data [ step - 1 , colidx ] = value
return pandas . DataFrame ( data , columns = tags )
|
def _parse_doc ( doc = '' ) :
"""Parse a docstring into title and description .
Args
doc : str
A docstring , optionally with a title line , separated from a description
line by at least one blank line .
Returns
title : str
The first line of the docstring .
description : str
The rest of a docstring ."""
|
title , description = '' , ''
if doc :
sp = doc . split ( '\n' , 1 )
title = sp [ 0 ] . strip ( )
if len ( sp ) > 1 :
description = textwrap . dedent ( sp [ 1 ] ) . strip ( )
return ( title , description )
|
def authenticate ( self , auth_url = None , ** kwargs ) :
"""Authenticates a user via the Keystone Identity API ."""
|
LOG . debug ( 'Beginning user authentication' )
if not auth_url :
auth_url = settings . OPENSTACK_KEYSTONE_URL
auth_url , url_fixed = utils . fix_auth_url_version_prefix ( auth_url )
if url_fixed :
LOG . warning ( "The OPENSTACK_KEYSTONE_URL setting points to a v2.0 " "Keystone endpoint, but v3 is specified as the API " "version to use by Horizon. Using v3 endpoint for " "authentication." )
plugin , unscoped_auth = self . _get_auth_backend ( auth_url , ** kwargs )
# the recent project id a user might have set in a cookie
recent_project = None
request = kwargs . get ( 'request' )
if request : # Grab recent _ project found in the cookie , try to scope
# to the last project used .
recent_project = request . COOKIES . get ( 'recent_project' )
unscoped_auth_ref = plugin . get_access_info ( unscoped_auth )
# Check expiry for our unscoped auth ref .
self . check_auth_expiry ( unscoped_auth_ref )
domain_name = kwargs . get ( 'user_domain_name' , None )
domain_auth , domain_auth_ref = plugin . get_domain_scoped_auth ( unscoped_auth , unscoped_auth_ref , domain_name )
scoped_auth , scoped_auth_ref = plugin . get_project_scoped_auth ( unscoped_auth , unscoped_auth_ref , recent_project = recent_project )
# Abort if there are no projects for this user and a valid domain
# token has not been obtained
# The valid use cases for a user login are :
# Keystone v2 : user must have a role on a project and be able
# to obtain a project scoped token
# Keystone v3 : 1 ) user can obtain a domain scoped token ( user
# has a role on the domain they authenticated to ) ,
# only , no roles on a project
# 2 ) user can obtain a domain scoped token and has
# a role on a project in the domain they
# authenticated to ( and can obtain a project scoped
# token )
# 3 ) user cannot obtain a domain scoped token , but can
# obtain a project scoped token
if not scoped_auth_ref and domain_auth_ref : # if the user can ' t obtain a project scoped token , set the scoped
# token to be the domain token , if valid
scoped_auth = domain_auth
scoped_auth_ref = domain_auth_ref
elif not scoped_auth_ref and not domain_auth_ref :
msg = _ ( 'You are not authorized for any projects.' )
if utils . get_keystone_version ( ) >= 3 :
msg = _ ( 'You are not authorized for any projects or domains.' )
raise exceptions . KeystoneAuthException ( msg )
# Check expiry for our new scoped token .
self . check_auth_expiry ( scoped_auth_ref )
# We want to try to use the same region we just logged into
# which may or may not be the default depending upon the order
# keystone uses
region_name = None
id_endpoints = scoped_auth_ref . service_catalog . get_endpoints ( service_type = 'identity' )
for id_endpoint in [ cat for cat in id_endpoints [ 'identity' ] ] :
if auth_url in id_endpoint . values ( ) :
region_name = id_endpoint [ 'region' ]
break
interface = getattr ( settings , 'OPENSTACK_ENDPOINT_TYPE' , 'public' )
endpoint , url_fixed = utils . fix_auth_url_version_prefix ( scoped_auth_ref . service_catalog . url_for ( service_type = 'identity' , interface = interface , region_name = region_name ) )
if url_fixed :
LOG . warning ( "The Keystone URL in service catalog points to a v2.0 " "Keystone endpoint, but v3 is specified as the API " "version to use by Horizon. Using v3 endpoint for " "authentication." )
# If we made it here we succeeded . Create our User !
unscoped_token = unscoped_auth_ref . auth_token
user = auth_user . create_user_from_token ( request , auth_user . Token ( scoped_auth_ref , unscoped_token = unscoped_token ) , endpoint , services_region = region_name )
if request is not None : # if no k2k providers exist then the function returns quickly
utils . store_initial_k2k_session ( auth_url , request , scoped_auth_ref , unscoped_auth_ref )
request . session [ 'unscoped_token' ] = unscoped_token
if domain_auth_ref : # check django session engine , if using cookies , this will not
# work , as it will overflow the cookie so don ' t add domain
# scoped token to the session and put error in the log
if utils . using_cookie_backed_sessions ( ) :
LOG . error ( 'Using signed cookies as SESSION_ENGINE with ' 'OPENSTACK_KEYSTONE_MULTIDOMAIN_SUPPORT is ' 'enabled. This disables the ability to ' 'perform identity operations due to cookie size ' 'constraints.' )
else :
request . session [ 'domain_token' ] = domain_auth_ref
request . user = user
timeout = getattr ( settings , "SESSION_TIMEOUT" , 3600 )
token_life = user . token . expires - datetime . datetime . now ( pytz . utc )
session_time = min ( timeout , int ( token_life . total_seconds ( ) ) )
request . session . set_expiry ( session_time )
keystone_client_class = utils . get_keystone_client ( ) . Client
session = utils . get_session ( )
scoped_client = keystone_client_class ( session = session , auth = scoped_auth )
# Support client caching to save on auth calls .
setattr ( request , KEYSTONE_CLIENT_ATTR , scoped_client )
LOG . debug ( 'Authentication completed.' )
return user
|
def _pusher_connect_handler ( self , data ) :
"""Event handler for the connection _ established event . Binds the
shortlink _ scanned event"""
|
self . channel = self . pusher . subscribe ( self . pos_callback_chan )
for listener in self . pusher_connected_listeners :
listener ( data )
|
def _validate_json_for_global_workflow ( json_spec , args ) :
"""Validates fields used for building a global workflow .
Since building a global workflow is done after all the underlying workflows
are built , which may be time - consuming , we validate as much as possible here ."""
|
# TODO : verify the billTo can build the workflow
# TODO : if the global workflow build fails add an option to interactively change billto
# TODO : ( or other simple fields ) instead of failing altogether
# TODO : get a confirmation before building a workflow that may be costly
if 'name' not in json_spec :
raise WorkflowBuilderException ( "dxworkflow.json contains no 'name' field, but it is required to build a global workflow" )
if not dxpy . executable_builder . GLOBAL_EXEC_NAME_RE . match ( json_spec [ 'name' ] ) :
raise WorkflowBuilderException ( "The name of your workflow must match /^[a-zA-Z0-9._-]+$/" )
if json_spec [ 'name' ] != json_spec [ 'name' ] . lower ( ) :
logger . warn ( 'workflow name "{}" should be all lowercase' . format ( json_spec [ 'name' ] ) )
if 'version' not in json_spec :
raise WorkflowBuilderException ( "dxworkflow.json contains no 'version' field, but it is required to build a global workflow" )
if not dxpy . executable_builder . GLOBAL_EXEC_VERSION_RE . match ( json_spec [ 'version' ] ) :
logger . warn ( '"version" {} should be semver compliant (e.g. of the form X.Y.Z)' . format ( json_spec [ 'version' ] ) )
if 'details' in json_spec :
if not isinstance ( json_spec [ 'details' ] , dict ) :
raise WorkflowBuilderException ( 'The field "details" must be a dictionary' )
if 'regionalOptions' in json_spec :
if not ( isinstance ( json_spec [ 'regionalOptions' ] , dict ) and json_spec [ 'regionalOptions' ] and all ( [ isinstance ( i , dict ) for i in json_spec [ 'regionalOptions' ] . values ( ) ] ) ) :
raise WorkflowBuilderException ( 'The field "regionalOptions" must be a non-empty dictionary whose values are dictionaries' )
if args . bill_to :
json_spec [ "billTo" ] = args . bill_to
|
def plot_roc_curve ( y_true , y_probas , title = 'ROC Curves' , curves = ( 'micro' , 'macro' , 'each_class' ) , ax = None , figsize = None , cmap = 'nipy_spectral' , title_fontsize = "large" , text_fontsize = "medium" ) :
"""Generates the ROC curves from labels and predicted scores / probabilities
Args :
y _ true ( array - like , shape ( n _ samples ) ) :
Ground truth ( correct ) target values .
y _ probas ( array - like , shape ( n _ samples , n _ classes ) ) :
Prediction probabilities for each class returned by a classifier .
title ( string , optional ) : Title of the generated plot . Defaults to
" ROC Curves " .
curves ( array - like ) : A listing of which curves should be plotted on the
resulting plot . Defaults to ` ( " micro " , " macro " , " each _ class " ) `
i . e . " micro " for micro - averaged curve , " macro " for macro - averaged
curve
ax ( : class : ` matplotlib . axes . Axes ` , optional ) : The axes upon which to
plot the curve . If None , the plot is drawn on a new set of axes .
figsize ( 2 - tuple , optional ) : Tuple denoting figure size of the plot
e . g . ( 6 , 6 ) . Defaults to ` ` None ` ` .
cmap ( string or : class : ` matplotlib . colors . Colormap ` instance , optional ) :
Colormap used for plotting the projection . View Matplotlib Colormap
documentation for available options .
https : / / matplotlib . org / users / colormaps . html
title _ fontsize ( string or int , optional ) : Matplotlib - style fontsizes .
Use e . g . " small " , " medium " , " large " or integer - values . Defaults to
" large " .
text _ fontsize ( string or int , optional ) : Matplotlib - style fontsizes .
Use e . g . " small " , " medium " , " large " or integer - values . Defaults to
" medium " .
Returns :
ax ( : class : ` matplotlib . axes . Axes ` ) : The axes on which the plot was
drawn .
Example :
> > > import scikitplot . plotters as skplt
> > > nb = GaussianNB ( )
> > > nb = nb . fit ( X _ train , y _ train )
> > > y _ probas = nb . predict _ proba ( X _ test )
> > > skplt . plot _ roc _ curve ( y _ test , y _ probas )
< matplotlib . axes . _ subplots . AxesSubplot object at 0x7fe967d64490 >
> > > plt . show ( )
. . image : : _ static / examples / plot _ roc _ curve . png
: align : center
: alt : ROC Curves"""
|
y_true = np . array ( y_true )
y_probas = np . array ( y_probas )
if 'micro' not in curves and 'macro' not in curves and 'each_class' not in curves :
raise ValueError ( 'Invalid argument for curves as it ' 'only takes "micro", "macro", or "each_class"' )
classes = np . unique ( y_true )
probas = y_probas
fpr = dict ( )
tpr = dict ( )
roc_auc = dict ( )
for i in range ( len ( classes ) ) :
fpr [ i ] , tpr [ i ] , _ = roc_curve ( y_true , probas [ : , i ] , pos_label = classes [ i ] )
roc_auc [ i ] = auc ( fpr [ i ] , tpr [ i ] )
# Compute micro - average ROC curve and ROC area
micro_key = 'micro'
i = 0
while micro_key in fpr :
i += 1
micro_key += str ( i )
y_true = label_binarize ( y_true , classes = classes )
if len ( classes ) == 2 :
y_true = np . hstack ( ( 1 - y_true , y_true ) )
fpr [ micro_key ] , tpr [ micro_key ] , _ = roc_curve ( y_true . ravel ( ) , probas . ravel ( ) )
roc_auc [ micro_key ] = auc ( fpr [ micro_key ] , tpr [ micro_key ] )
# Compute macro - average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np . unique ( np . concatenate ( [ fpr [ x ] for x in range ( len ( classes ) ) ] ) )
# Then interpolate all ROC curves at this points
mean_tpr = np . zeros_like ( all_fpr )
for i in range ( len ( classes ) ) :
mean_tpr += interp ( all_fpr , fpr [ i ] , tpr [ i ] )
# Finally average it and compute AUC
mean_tpr /= len ( classes )
macro_key = 'macro'
i = 0
while macro_key in fpr :
i += 1
macro_key += str ( i )
fpr [ macro_key ] = all_fpr
tpr [ macro_key ] = mean_tpr
roc_auc [ macro_key ] = auc ( fpr [ macro_key ] , tpr [ macro_key ] )
if ax is None :
fig , ax = plt . subplots ( 1 , 1 , figsize = figsize )
ax . set_title ( title , fontsize = title_fontsize )
if 'each_class' in curves :
for i in range ( len ( classes ) ) :
color = plt . cm . get_cmap ( cmap ) ( float ( i ) / len ( classes ) )
ax . plot ( fpr [ i ] , tpr [ i ] , lw = 2 , color = color , label = 'ROC curve of class {0} (area = {1:0.2f})' '' . format ( classes [ i ] , roc_auc [ i ] ) )
if 'micro' in curves :
ax . plot ( fpr [ micro_key ] , tpr [ micro_key ] , label = 'micro-average ROC curve ' '(area = {0:0.2f})' . format ( roc_auc [ micro_key ] ) , color = 'deeppink' , linestyle = ':' , linewidth = 4 )
if 'macro' in curves :
ax . plot ( fpr [ macro_key ] , tpr [ macro_key ] , label = 'macro-average ROC curve ' '(area = {0:0.2f})' . format ( roc_auc [ macro_key ] ) , color = 'navy' , linestyle = ':' , linewidth = 4 )
ax . plot ( [ 0 , 1 ] , [ 0 , 1 ] , 'k--' , lw = 2 )
ax . set_xlim ( [ 0.0 , 1.0 ] )
ax . set_ylim ( [ 0.0 , 1.05 ] )
ax . set_xlabel ( 'False Positive Rate' , fontsize = text_fontsize )
ax . set_ylabel ( 'True Positive Rate' , fontsize = text_fontsize )
ax . tick_params ( labelsize = text_fontsize )
ax . legend ( loc = 'lower right' , fontsize = text_fontsize )
return ax
|
def stdout_redirector ( ) :
"""Simplify redirect of stdout .
Taken from here : https : / / eli . thegreenplace . net / 2015 / redirecting - all - kinds - of - stdout - in - python /"""
|
old_stdout = sys . stdout
sys . stdout = Stream ( )
try :
yield sys . stdout
finally :
sys . stdout . close ( )
sys . stdout = old_stdout
|
def _get_implied_apps ( self , detected_apps ) :
"""Get the set of apps implied by ` detected _ apps ` ."""
|
def __get_implied_apps ( apps ) :
_implied_apps = set ( )
for app in apps :
try :
_implied_apps . update ( set ( self . apps [ app ] [ 'implies' ] ) )
except KeyError :
pass
return _implied_apps
implied_apps = __get_implied_apps ( detected_apps )
all_implied_apps = set ( )
# Descend recursively until we ' ve found all implied apps
while not all_implied_apps . issuperset ( implied_apps ) :
all_implied_apps . update ( implied_apps )
implied_apps = __get_implied_apps ( all_implied_apps )
return all_implied_apps
|
def set_of_vars ( arg_plot ) :
"""Build set of needed variables .
Args :
arg _ plot ( str ) : string with variable names separated with ` ` , ` ` .
Returns :
set of str : set of variables ."""
|
return set ( var for var in arg_plot . split ( ',' ) if var in phyvars . PLATES )
|
def main ( ) -> None :
"""The main CLI interface entry point ."""
|
parser = argparse . ArgumentParser ( description = "Security analysis of Ethereum smart contracts" )
create_parser ( parser )
# Get config values
args = parser . parse_args ( )
parse_args ( parser = parser , args = args )
|
def get_point_index ( point , all_points , eps = 1e-4 ) :
"""Get the index of a point in an array"""
|
inds = np . where ( np . linalg . norm ( point - all_points , axis = 1 ) < eps )
if inds [ 0 ] . shape [ 0 ] == 0 :
return - 1
return inds [ 0 ] [ 0 ]
|
def _m_to_e ( cls , e , M ) :
"""Conversion from Mean Anomaly to Eccentric anomaly
Procedures for solving Kepler ' s Equation , A . W . Odell and R . H . Gooding ,
Celestial Mechanics 38 ( 1986 ) 307-334"""
|
k1 = 3 * np . pi + 2
k2 = np . pi - 1
k3 = 6 * np . pi - 1
A = 3 * k2 ** 2 / k1
B = k3 ** 2 / ( 6 * k1 )
m1 = float ( M )
if abs ( m1 ) < 1 / 6 :
E = m1 + e * ( 6 * m1 ) ** ( 1 / 3 ) - m1
elif m1 < 0 :
w = np . pi + m1
E = m1 + e * ( A * w / ( B - w ) - np . pi - m1 )
else :
w = np . pi - m1
E = m1 + e * ( np . pi - A * w / ( B - w ) - m1 )
e1 = 1 - e
risk_disabler = ( e1 + E ** 2 / 6 ) >= 0.1
for i in range ( 2 ) :
fdd = e * sin ( E )
fddd = e * cos ( E )
if risk_disabler :
f = ( E - fdd ) - m1
fd = 1 - fddd
else :
f = cls . _e_e_sin_e ( e , E ) - m1
s = sin ( E / 2 )
fd = e1 + 2 * e * s ** 2
dee = f * fd / ( 0.5 * f * fdd - fd ** 2 )
w = fd + 0.5 * dee * ( fdd + dee * fddd / 3 )
fd += dee * ( fdd + 0.5 * dee * fddd )
E -= ( f - dee * ( fd - w ) ) / fd
E += M - m1
return E
|
def find_stages ( document ) :
"""Find * * stages * * in document .
Args :
document ( dict ) : validated spline document loaded from a yaml file .
Returns :
list : stages as a part of the spline document or an empty list if not given .
> > > find _ stages ( { ' pipeline ' : [ { ' stage ( Prepare ) ' : 1 } , { ' stage ( Build ) ' : 1 } , { ' stage ( Deploy ) ' : 2 } ] } )
[ ' Prepare ' , ' Build ' , ' Deploy ' ]"""
|
names = [ ]
if 'pipeline' in document :
for entry in document [ 'pipeline' ] : # each entry is dictionary with one key only
key , _ = list ( entry . items ( ) ) [ 0 ]
if key . startswith ( "stage(" ) :
names . append ( key . replace ( 'stage(' , '' ) . replace ( ')' , '' ) )
return names
|
def batch_get ( self , offset = 0 , count = 50 , status_list = None ) :
"""批量查询卡券信息"""
|
card_data = { 'offset' : offset , 'count' : count }
if status_list :
card_data [ 'status_list' ] = status_list
return self . _post ( 'card/batchget' , data = card_data )
|
def pipecmd ( cmd1 , cmd2 ) :
"""Return output of " cmd1 | cmd2 " ."""
|
p1 = subprocess . Popen ( cmd1 , stdout = subprocess . PIPE )
p2 = subprocess . Popen ( cmd2 , stdin = p1 . stdout , stdout = subprocess . PIPE )
p1 . stdout . close ( )
# Allow p1 to receive a SIGPIPE if p2 exits .
return p2 . communicate ( ) [ 0 ]
|
def apply_plugin_settings ( self , options ) :
"""Apply configuration file ' s plugin settings"""
|
font_n = 'plugin_font'
font_o = self . get_plugin_font ( )
help_n = 'connect_to_oi'
help_o = CONF . get ( 'help' , 'connect/ipython_console' )
color_scheme_n = 'color_scheme_name'
color_scheme_o = CONF . get ( 'appearance' , 'selected' )
show_time_n = 'show_elapsed_time'
show_time_o = self . get_option ( show_time_n )
reset_namespace_n = 'show_reset_namespace_warning'
reset_namespace_o = self . get_option ( reset_namespace_n )
ask_before_restart_n = 'ask_before_restart'
ask_before_restart_o = self . get_option ( ask_before_restart_n )
for client in self . clients :
control = client . get_control ( )
if font_n in options :
client . set_font ( font_o )
if help_n in options and control is not None :
control . set_help_enabled ( help_o )
if color_scheme_n in options :
client . set_color_scheme ( color_scheme_o )
if show_time_n in options :
client . show_time_action . setChecked ( show_time_o )
client . set_elapsed_time_visible ( show_time_o )
if reset_namespace_n in options :
client . reset_warning = reset_namespace_o
if ask_before_restart_n in options :
client . ask_before_restart = ask_before_restart_o
|
def accept ( self ) : # type : ( ) - > str
"""The content - type for the response to the client .
Returns :
( str ) : The value of the header ' Accept ' or the user - supplied SAGEMAKER _ DEFAULT _ INVOCATIONS _ ACCEPT
environment variable ."""
|
accept = self . headers . get ( 'Accept' )
if not accept or accept == _content_types . ANY :
return self . _default_accept
else :
return accept
|
def reduce_to_2d ( arr ) :
"""Given a np . npdarray with nDims > 2 , reduce it to 2d .
It does this by selecting the zeroth coordinate for every dimension greater
than two .
Args :
arr : a numpy ndarray of dimension at least 2.
Returns :
A two - dimensional subarray from the input array .
Raises :
ValueError : If the argument is not a numpy ndarray , or the dimensionality
is too low ."""
|
if not isinstance ( arr , np . ndarray ) :
raise ValueError ( 'reduce_to_2d requires a numpy.ndarray' )
ndims = len ( arr . shape )
if ndims < 2 :
raise ValueError ( 'reduce_to_2d requires an array of dimensionality >=2' )
# slice ( None ) is equivalent to ` : ` , so we take arr [ 0,0 , . . . 0 , : , : ]
slices = ( [ 0 ] * ( ndims - 2 ) ) + [ slice ( None ) , slice ( None ) ]
return arr [ slices ]
|
def get_arguments ( ) :
"""Get parsed arguments ."""
|
parser = argparse . ArgumentParser ( "Lupupy: Command Line Utility" )
parser . add_argument ( '-u' , '--username' , help = 'Username' , required = False )
parser . add_argument ( '-p' , '--password' , help = 'Password' , required = False )
parser . add_argument ( '--arm' , help = 'Arm alarm to mode' , required = False , default = False , action = "store_true" )
parser . add_argument ( '-i' , '--ip_address' , help = 'IP of the Lupus panel' , required = False )
parser . add_argument ( '--disarm' , help = 'Disarm the alarm' , required = False , default = False , action = "store_true" )
parser . add_argument ( '--home' , help = 'Set to home mode' , required = False , default = False , action = "store_true" )
parser . add_argument ( '--devices' , help = 'Output all devices' , required = False , default = False , action = "store_true" )
parser . add_argument ( '--history' , help = 'Get the history' , required = False , default = False , action = "store_true" )
parser . add_argument ( '--status' , help = 'Get the status of the panel' , required = False , default = False , action = "store_true" )
parser . add_argument ( '--debug' , help = 'Enable debug logging' , required = False , default = False , action = "store_true" )
parser . add_argument ( '--quiet' , help = 'Output only warnings and errors' , required = False , default = False , action = "store_true" )
return parser . parse_args ( )
|
async def update ( self ) :
"""Updates this interface ' s messages with the latest data ."""
|
if self . update_lock . locked ( ) :
return
async with self . update_lock :
if self . update_lock . locked ( ) : # if this engagement has caused the semaphore to exhaust ,
# we are overloaded and need to calm down .
await asyncio . sleep ( 1 )
if not self . message : # too fast , stagger so this update gets through
await asyncio . sleep ( 0.5 )
if not self . sent_page_reactions and self . page_count > 1 :
self . bot . loop . create_task ( self . send_all_reactions ( ) )
self . sent_page_reactions = True
# don ' t spawn any more tasks
await self . message . edit ( ** self . send_kwargs )
|
def validate_err_calc ( val ) :
"""Validation function for the
: attr : ` psy _ simple . plotter . FldmeanPlotter . err _ calc ` formatoption"""
|
try :
val = validate_float ( val )
except ( ValueError , TypeError ) :
pass
else :
if val <= 100 and val >= 0 :
return val
raise ValueError ( "Percentiles for the error calculation must lie " "between 0 and 100, not %s" % val )
try :
val = ValidateList ( float , 2 ) ( val )
except ( ValueError , TypeError ) :
pass
else :
if all ( ( v <= 100 and v >= 0 ) for v in val ) :
return val
raise ValueError ( "Percentiles for the error calculation must lie " "between 0 and 100, not %s" % val )
try :
val = validate_str ( val )
except ValueError :
pass
else :
if 'std' not in val :
raise ValueError ( 'A string for the error calculation must contain std!' )
return val
|
def check_regularizers ( regularizers , keys ) :
"""Checks the given regularizers .
This checks that ` regularizers ` is a dictionary that only contains keys in
` keys ` , and furthermore the entries in ` regularizers ` are functions or
further dictionaries ( the latter used , for example , in passing regularizers
to modules inside modules ) that must satisfy the same constraints .
Args :
regularizers : Dictionary of regularizers ( allowing nested dictionaries ) or
None .
keys : Iterable of valid keys for ` regularizers ` .
Returns :
Copy of checked dictionary of regularizers . If ` regularizers = None ` , an empty
dictionary will be returned .
Raises :
KeyError : If an regularizers is provided for a key not in ` keys ` .
TypeError : If a provided regularizer is not a callable function , or
` regularizers ` is not a Mapping ."""
|
if regularizers is None :
return { }
_assert_is_dictlike ( regularizers , valid_keys = keys )
keys = set ( keys )
if not set ( regularizers ) <= keys :
extra_keys = set ( regularizers ) - keys
raise KeyError ( "Invalid regularizer keys {}, regularizers can only " "be provided for {}" . format ( ", " . join ( "'{}'" . format ( key ) for key in extra_keys ) , ", " . join ( "'{}'" . format ( key ) for key in keys ) ) )
_check_nested_callables ( regularizers , "Regularizer" )
return dict ( regularizers )
|
def get_player_img ( player_id ) :
"""Returns the image of the player from stats . nba . com as a numpy array and
saves the image as PNG file in the current directory .
Parameters
player _ id : int
The player ID used to find the image .
Returns
player _ img : ndarray
The multidimensional numpy array of the player image , which matplotlib
can plot ."""
|
url = "http://stats.nba.com/media/players/230x185/" + str ( player_id ) + ".png"
img_file = str ( player_id ) + ".png"
pic = urlretrieve ( url , img_file )
player_img = plt . imread ( pic [ 0 ] )
return player_img
|
def prior ( self ) :
"""Model prior for particular model .
Product of eclipse probability ( ` ` self . prob ` ` ) ,
the fraction of scenario that is allowed by the various
constraints ( ` ` self . selectfrac ` ` ) , and all additional
factors in ` ` self . priorfactors ` ` ."""
|
prior = self . prob * self . selectfrac
for f in self . priorfactors :
prior *= self . priorfactors [ f ]
return prior
|
def width_tuple ( value ) :
"""test if value is a valid width indicator ( for a sub - widget in a column ) .
This can either be
( ' fit ' , min , max ) : use the length actually needed for the content , padded
to use at least width min , and cut of at width max .
Here , min and max are positive integers or 0 to disable
the boundary .
( ' weight ' , n ) : have it relative weight of n compared to other columns .
Here , n is an int ."""
|
if value is None :
res = 'fit' , 0 , 0
elif not isinstance ( value , ( list , tuple ) ) :
raise VdtTypeError ( value )
elif value [ 0 ] not in [ 'fit' , 'weight' ] :
raise VdtTypeError ( value )
if value [ 0 ] == 'fit' :
if not isinstance ( value [ 1 ] , int ) or not isinstance ( value [ 2 ] , int ) :
VdtTypeError ( value )
res = 'fit' , int ( value [ 1 ] ) , int ( value [ 2 ] )
else :
if not isinstance ( value [ 1 ] , int ) :
VdtTypeError ( value )
res = 'weight' , int ( value [ 1 ] )
return res
|
def get_cluster_interfaces ( cluster , extra_cond = lambda nic : True ) :
"""Get the network interfaces names corresponding to a criteria .
Note that the cluster is passed ( not the individual node names ) , thus it is
assumed that all nodes in a cluster have the same interface names same
configuration . In addition to ` ` extra _ cond ` ` , only the mountable and
Ehernet interfaces are returned .
Args :
cluster ( str ) : the cluster to consider
extra _ cond ( lambda ) : boolean lambda that takes the nic ( dict ) as
parameter"""
|
nics = get_nics ( cluster )
# NOTE ( msimonin ) : Since 05/18 nics on g5k nodes have predictable names but
# the api description keep the legacy name ( device key ) and the new
# predictable name ( key name ) . The legacy names is still used for api
# request to the vlan endpoint This should be fixed in
# https : / / intranet . grid5000 . fr / bugzilla / show _ bug . cgi ? id = 9272
# When its fixed we should be able to only use the new predictable name .
nics = [ ( nic [ 'device' ] , nic [ 'name' ] ) for nic in nics if nic [ 'mountable' ] and nic [ 'interface' ] == 'Ethernet' and not nic [ 'management' ] and extra_cond ( nic ) ]
nics = sorted ( nics )
return nics
|
def plt_goea_results ( fout_img , goea_results , ** kws ) :
"""Plot a single page ."""
|
go_sources = [ rec . GO for rec in goea_results ]
go2obj = { rec . GO : rec . goterm for rec in goea_results }
gosubdag = GoSubDag ( go_sources , go2obj , rcntobj = True )
godagplot = GoSubDagPlot ( gosubdag , goea_results = goea_results , ** kws )
godagplot . plt_dag ( fout_img )
|
def read_text ( self ) : # type : ( ) - > Dict [ str , str ]
"""Get version out of ad - hoc version . txt
: return :"""
|
found = { }
for file in self . file_inventory . text_files :
if not os . path . isfile ( file ) :
continue
with self . file_opener . open_this ( file , "r" ) as infile :
text = infile . readline ( )
found [ file ] = text . strip ( " \n" )
return found
|
def parse_security_group ( self , global_params , region , security_group ) :
"""Parse a single Redsfhit security group
: param global _ params : Parameters shared for all regions
: param region : Name of the AWS region
: param security ) _ group : Security group"""
|
name = security_group . pop ( 'ClusterSecurityGroupName' )
security_group [ 'name' ] = name
self . security_groups [ 'name' ] = security_group
|
def disable_svc_notifications ( self , service ) :
"""Disable notifications for a service
Format of the line that triggers function call : :
DISABLE _ SVC _ NOTIFICATIONS ; < host _ name > ; < service _ description >
: param service : service to edit
: type service : alignak . objects . service . Service
: return : None"""
|
if service . notifications_enabled :
service . modified_attributes |= DICT_MODATTR [ "MODATTR_NOTIFICATIONS_ENABLED" ] . value
service . notifications_enabled = False
self . send_an_element ( service . get_update_status_brok ( ) )
|
def _get_mean ( self , imt , mag , hypo_depth , rrup , d ) :
"""Return mean value as defined in equation 3.5.1-1 page 148"""
|
# clip magnitude at 8.3 as per note at page 3-36 in table Table 3.3.2-6
# in " Technical Reports on National Seismic Hazard Maps for Japan "
mag = min ( mag , 8.3 )
if imt . name == 'PGV' :
mean = ( 0.58 * mag + 0.0038 * hypo_depth + d - 1.29 - np . log10 ( rrup + 0.0028 * 10 ** ( 0.5 * mag ) ) - 0.002 * rrup )
else :
mean = ( 0.50 * mag + 0.0043 * hypo_depth + d + 0.61 - np . log10 ( rrup + 0.0055 * 10 ** ( 0.5 * mag ) ) - 0.003 * rrup )
mean = np . log10 ( 10 ** ( mean ) / ( g * 100 ) )
return mean
|
def keys ( self ) :
"""Return a copy of the flat dictionary ' s list of keys .
See the note for : meth : ` flatdict . FlatDict . items ` .
: rtype : list"""
|
keys = [ ]
for key , value in self . _values . items ( ) :
if isinstance ( value , ( FlatDict , dict ) ) :
nested = [ self . _delimiter . join ( [ key , k ] ) for k in value . keys ( ) ]
keys += nested if nested else [ key ]
else :
keys . append ( key )
return sorted ( keys )
|
def buildNavigation ( self ) :
"""Chooses the appropriate layout navigation component based on user prefs"""
|
if self . buildSpec [ 'navigation' ] == constants . TABBED :
navigation = Tabbar ( self , self . buildSpec , self . configs )
else :
navigation = Sidebar ( self , self . buildSpec , self . configs )
if self . buildSpec [ 'navigation' ] == constants . HIDDEN :
navigation . Hide ( )
return navigation
|
def _process ( self , data_buffer ) :
"""Handle incoming packet from server ."""
|
packet = json . loads ( data_buffer )
if packet [ 'cmd' ] == 'out' : # Call os . write manually . In Python2.6 , sys . stdout . write doesn ' t use UTF - 8.
original_mode = DWORD ( 0 )
windll . kernel32 . GetConsoleMode ( self . _hconsole , byref ( original_mode ) )
windll . kernel32 . SetConsoleMode ( self . _hconsole , DWORD ( ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING ) )
try :
os . write ( sys . stdout . fileno ( ) , packet [ 'data' ] . encode ( 'utf-8' ) )
finally :
windll . kernel32 . SetConsoleMode ( self . _hconsole , original_mode )
elif packet [ 'cmd' ] == 'suspend' : # Suspend client process to background .
pass
elif packet [ 'cmd' ] == 'mode' :
pass
|
def initialize_fields ( self , content ) :
"""Initializes the : class : ` Field ` elements in the ` Array ` with the
* values * in the * content * list .
If the * content * list is shorter than the ` Array ` then the * content *
list is used as a rotating fill pattern for the : class : ` Field ` elements
in the ` Array ` .
: param list content : a list contains the : class : ` Field ` values for each
element in the ` Array ` or one : class : ` Field ` value for all elements
in the ` Array ` ."""
|
if isinstance ( content , ( list , tuple ) ) :
capacity = len ( content )
for i in range ( 0 , len ( self ) , capacity ) :
for name , pair in enumerate ( zip ( self [ i : i + capacity ] , content ) , start = i ) :
item , value = pair
if is_mixin ( item ) : # Container or Pointer
item . initialize_fields ( value )
elif is_field ( item ) : # Fields
item . value = value
else :
raise MemberTypeError ( self , item , name )
else :
for name , item in enumerate ( self ) :
if is_mixin ( item ) : # Container or Pointer
item . initialize_fields ( content )
elif is_field ( item ) : # Fields
item . value = content
else :
raise MemberTypeError ( self , item , name )
|
def _prepare_load_balancers ( self ) :
"""Prepare load balancer variables"""
|
stack = { A . NAME : self [ A . NAME ] , A . VERSION : self [ A . VERSION ] , }
for load_balancer in self . get ( R . LOAD_BALANCERS , [ ] ) :
svars = { A . STACK : stack }
load_balancer [ A . loadbalancer . VARS ] = svars
|
def get_top_edge_depth ( self ) :
"""Return minimum depth of surface ' s top edge .
: returns :
Float value , the vertical distance between the earth surface
and the shallowest point in surface ' s top edge in km ."""
|
top_edge = self . mesh [ 0 : 1 ]
if top_edge . depths is None :
return 0
else :
return numpy . min ( top_edge . depths )
|
def extractFieldsFromResult ( data ) :
'''Method that parses Infobel textual information to return a series of attributes .
: return : a list of i3visio - like objects .'''
|
entities = [ ]
# Defining the objects to extract
fieldsRegExp = { }
fieldsRegExp [ "i3visio.fullname" ] = "<span class=\"fn\">([^<]*)</span>"
fieldsRegExp [ "i3visio.name" ] = " por <strong>[^ ]* ([^<]*)</strong>"
fieldsRegExp [ "i3visio.surname" ] = " por <strong>([^ ]*) "
fieldsRegExp [ "i3visio.location.address" ] = "itemprop=\"streetAddress\">([^<]*)</span>"
fieldsRegExp [ "i3visio.location.city" ] = "addressLocality\">([^<]*)</span>"
fieldsRegExp [ "i3visio.location.postalcode" ] = "postalCode\">([^<]*)</span>"
fieldsRegExp [ "i3visio.phone" ] = "document.write\('([0-9]+)'"
for field in fieldsRegExp . keys ( ) :
listRecovered = re . findall ( fieldsRegExp [ field ] , data )
if len ( listRecovered ) > 0 :
aux = { }
aux [ "type" ] = field
aux [ "value" ] = listRecovered [ 0 ] . replace ( '\xa0' , ' ' )
aux [ "attributes" ] = [ ]
entities . append ( aux )
return entities
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.