signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def add_settings ( mod , allow_extras = True , settings = django_settings ) :
"""Adds all settings that are part of ` ` mod ` ` to the global settings object .
Special cases ` ` EXTRA _ APPS ` ` to append the specified applications to the
list of ` ` INSTALLED _ APPS ` ` .""" | extras = { }
for setting in dir ( mod ) :
if setting == setting . upper ( ) :
setting_value = getattr ( mod , setting )
if setting in TUPLE_SETTINGS and type ( setting_value ) == str :
setting_value = ( setting_value , )
# In case the user forgot the comma .
# Any setting that starts with EXTRA _ and matches a setting that is a list or tuple
# will automatically append the values to the current setting .
# It might make sense to make this less magical
if setting . startswith ( 'EXTRA_' ) :
base_setting = setting . split ( 'EXTRA_' , 1 ) [ - 1 ]
if isinstance ( getattr ( settings , base_setting ) , ( list , tuple ) ) :
extras [ base_setting ] = setting_value
continue
setattr ( settings , setting , setting_value )
for key , value in extras . items ( ) :
curval = getattr ( settings , key )
setattr ( settings , key , curval + type ( curval ) ( value ) ) |
def get_input ( source , files , threads = 4 , readtype = "1D" , combine = "simple" , names = None , barcoded = False ) :
"""Get input and process accordingly .
Data can be :
- a uncompressed , bgzip , bzip2 or gzip compressed fastq file
- a uncompressed , bgzip , bzip2 or gzip compressed fasta file
- a rich fastq containing additional key = value information in the description ,
as produced by MinKNOW and albacore with the same compression options as above
- a sorted bam file
- a sorted cram file
- a ( compressed ) sequencing _ summary . txt file generated by albacore
Handle is passed to the proper functions to get DataFrame with metrics
Multiple files of the same type can be used to extract info from , which is done in parallel
Arguments :
- source : defines the input data type and the function that needs to be called
- files : is a list of one or more files to operate on , from the type of < source >
- threads : is the amount of workers which can be used
- readtype : ( only relevant for summary input ) and specifies which columns have to be extracted
- combine : is either ' simple ' or ' track ' , with the difference that with ' track ' an additional
field is created with the name of the dataset
- names : if combine = " track " , the names to be used for the datasets . Needs to have same length as
files , or None""" | proc_functions = { 'fastq' : ex . process_fastq_plain , 'fasta' : ex . process_fasta , 'bam' : ex . process_bam , 'summary' : ex . process_summary , 'fastq_rich' : ex . process_fastq_rich , 'fastq_minimal' : ex . process_fastq_minimal , 'cram' : ex . process_cram , 'ubam' : ex . process_ubam , }
filethreads = min ( len ( files ) , threads )
threadsleft = threads - filethreads
with cfutures . ProcessPoolExecutor ( max_workers = filethreads ) as executor :
extration_function = partial ( proc_functions [ source ] , threads = threadsleft , readtype = readtype , barcoded = barcoded )
datadf = combine_dfs ( dfs = [ out for out in executor . map ( extration_function , files ) ] , names = names or files , method = combine )
if "readIDs" in datadf and pd . isna ( datadf [ "readIDs" ] ) . any ( ) :
datadf . drop ( "readIDs" , axis = 'columns' , inplace = True )
datadf = calculate_start_time ( datadf )
logging . info ( "Nanoget: Gathered all metrics of {} reads" . format ( len ( datadf ) ) )
if len ( datadf ) == 0 :
logging . critical ( "Nanoget: no reads retrieved." . format ( len ( datadf ) ) )
sys . exit ( "Fatal: No reads found in input." )
else :
return datadf |
def _directory_prefix ( self ) :
"""Downloader options for specific directory""" | if self . downder == "wget" :
self . dir_prefix = "--directory-prefix="
elif self . downder == "aria2c" :
self . dir_prefix = "--dir=" |
def add_bgp_peering ( self , bgp_peering , external_bgp_peer = None , network = None ) :
"""Add a BGP configuration to this routing interface .
If the interface has multiple ip addresses , all networks will receive
the BGP peering by default unless the ` ` network ` ` parameter is
specified .
Example of adding BGP to an interface by ID : :
interface = engine . routing . get ( 0)
interface . add _ bgp _ peering (
BGPPeering ( ' mypeer ' ) ,
ExternalBGPPeer ( ' neighbor ' ) )
: param BGPPeering bgp _ peering : BGP Peer element
: param ExternalBGPPeer , Engine external _ bgp _ peer : peer element or href
: param str network : if network specified , only add OSPF to this network
on interface
: raises ModificationAborted : Change must be made at the interface level
: raises UpdateElementFailed : failed to add BGP
: return : Status of whether the route table was updated
: rtype : bool""" | destination = [ external_bgp_peer ] if external_bgp_peer else [ ]
routing_node_gateway = RoutingNodeGateway ( bgp_peering , destinations = destination )
return self . _add_gateway_node ( 'bgp_peering' , routing_node_gateway , network ) |
def condition ( self ) -> bool :
"""check JWT , then check session for validity""" | jwt = JWT ( )
if jwt . verify_http_auth_token ( ) :
if not current_app . config [ 'AUTH' ] [ 'FAST_SESSIONS' ] :
session = SessionModel . where_session_id ( jwt . data [ 'session_id' ] )
if session is None :
return False
Session . set_current_session ( jwt . data [ 'session_id' ] )
return True
return False |
def process_lpd ( name , dir_tmp ) :
"""Opens up json file , invokes doi _ resolver , closes file , updates changelog , cleans directory , and makes new bag .
: param str name : Name of current . lpd file
: param str dir _ tmp : Path to tmp directory
: return none :""" | logger_doi_main . info ( "enter process_lpd" )
dir_root = os . getcwd ( )
dir_bag = os . path . join ( dir_tmp , name )
dir_data = os . path . join ( dir_bag , 'data' )
# Navigate down to jLD file
# dir : dir _ root - > dir _ data
os . chdir ( dir_data )
# Open jld file and read in the contents . Execute DOI Resolver .
jld_data = read_json_from_file ( os . path . join ( dir_data , name + '.jsonld' ) )
# Overwrite data with new data
jld_data = DOIResolver ( dir_root , name , jld_data ) . main ( )
# Open the jld file and overwrite the contents with the new data .
write_json_to_file ( jld_data )
# Open changelog . timestamp it . Prompt user for short description of changes . Close and save
# update _ changelog ( )
# Delete old bag files , and move files to bag root for re - bagging
# dir : dir _ data - > dir _ bag
dir_cleanup ( dir_bag , dir_data )
finish_bag ( dir_bag )
logger_doi_main . info ( "exit process_lpd" )
return |
def genpass ( pattern = r'[\w]{32}' ) :
"""generates a password with random chararcters""" | try :
return rstr . xeger ( pattern )
except re . error as e :
raise ValueError ( str ( e ) ) |
def _check_users ( users ) :
'''Checks if the input dictionary of users is valid .''' | messg = ''
valid = True
for user , user_details in six . iteritems ( users ) :
if not user_details :
valid = False
messg += 'Please provide details for username {user}.\n' . format ( user = user )
continue
if not ( isinstance ( user_details . get ( 'level' ) , int ) or 0 <= user_details . get ( 'level' ) <= 15 ) : # warn !
messg += 'Level must be a integer between 0 and 15 for username {user}. Will assume 0.\n' . format ( user = user )
return valid , messg |
def scalar_names ( self ) :
"""A list of scalar names for the dataset . This makes
sure to put the active scalar ' s name first in the list .""" | names = [ ]
for i in range ( self . GetPointData ( ) . GetNumberOfArrays ( ) ) :
names . append ( self . GetPointData ( ) . GetArrayName ( i ) )
for i in range ( self . GetCellData ( ) . GetNumberOfArrays ( ) ) :
names . append ( self . GetCellData ( ) . GetArrayName ( i ) )
try :
names . remove ( self . active_scalar_name )
names . insert ( 0 , self . active_scalar_name )
except ValueError :
pass
return names |
def connect_host ( kwargs = None , call = None ) :
'''Connect the specified host system in this VMware environment
CLI Example :
. . code - block : : bash
salt - cloud - f connect _ host my - vmware - config host = " myHostSystemName "''' | if call != 'function' :
raise SaltCloudSystemExit ( 'The connect_host function must be called with ' '-f or --function.' )
host_name = kwargs . get ( 'host' ) if kwargs and 'host' in kwargs else None
if not host_name :
raise SaltCloudSystemExit ( 'You must specify name of the host system.' )
# Get the service instance
si = _get_si ( )
host_ref = salt . utils . vmware . get_mor_by_property ( si , vim . HostSystem , host_name )
if not host_ref :
raise SaltCloudSystemExit ( 'Specified host system does not exist.' )
if host_ref . runtime . connectionState == 'connected' :
return { host_name : 'host system already connected' }
try :
task = host_ref . ReconnectHost_Task ( )
salt . utils . vmware . wait_for_task ( task , host_name , 'connect host' , 5 , 'info' )
except Exception as exc :
log . error ( 'Error while connecting host %s: %s' , host_name , exc , # Show the traceback if the debug logging level is enabled
exc_info_on_loglevel = logging . DEBUG )
return { host_name : 'failed to connect host' }
return { host_name : 'connected host' } |
def embedded_object ( self , embedded_object ) :
"""Setter method ; for a description see the getter method .""" | # pylint : disable = attribute - defined - outside - init
if embedded_object is False :
self . _embedded_object = None
else :
self . _embedded_object = _ensure_unicode ( embedded_object ) |
def _get_pipe_name ( self ) :
"""Returns the pipe name to create a serial connection .
: returns : pipe path ( string )""" | if sys . platform . startswith ( "win" ) :
pipe_name = r"\\.\pipe\gns3_vmware\{}" . format ( self . id )
else :
pipe_name = os . path . join ( tempfile . gettempdir ( ) , "gns3_vmware" , "{}" . format ( self . id ) )
try :
os . makedirs ( os . path . dirname ( pipe_name ) , exist_ok = True )
except OSError as e :
raise VMwareError ( "Could not create the VMware pipe directory: {}" . format ( e ) )
return pipe_name |
def infer_alpha_chain ( beta ) :
"""Given a parsed beta chain of a class II MHC , infer the most frequent
corresponding alpha chain .""" | if beta . gene . startswith ( "DRB" ) :
return AlleleName ( species = "HLA" , gene = "DRA1" , allele_family = "01" , allele_code = "01" )
elif beta . gene . startswith ( "DPB" ) : # Most common alpha chain for DP is DPA * 01:03 but we really
# need to change this logic to use a lookup table of pairwise
# frequencies for inferring the alpha - beta pairing
return AlleleName ( species = "HLA" , gene = "DPA1" , allele_family = "01" , allele_code = "03" )
elif beta . gene . startswith ( "DQB" ) : # Most common DQ alpha ( according to wikipedia )
# DQA1*01:02
return AlleleName ( species = "HLA" , gene = "DQA1" , allele_family = "01" , allele_code = "02" )
return None |
def list_domains ( ** kwargs ) :
'''Return a list of available domains .
: param connection : libvirt connection URI , overriding defaults
. . versionadded : : 2019.2.0
: param username : username to connect with , overriding defaults
. . versionadded : : 2019.2.0
: param password : password to connect with , overriding defaults
. . versionadded : : 2019.2.0
CLI Example :
. . code - block : : bash
salt ' * ' virt . list _ domains''' | vms = [ ]
conn = __get_conn ( ** kwargs )
for dom in _get_domain ( conn , iterable = True ) :
vms . append ( dom . name ( ) )
conn . close ( )
return vms |
def _get_y ( self , kwargs ) :
'''Returns y if it is explicitly defined in kwargs .
Otherwise , raises TypeError .''' | if 'y' in kwargs :
return round ( float ( kwargs [ 'y' ] ) , 6 )
elif self . _element_y in kwargs :
return round ( float ( kwargs [ self . _element_y ] ) , 6 )
elif self . _type == 3 and self . _element_1my in kwargs :
return round ( 1. - float ( kwargs [ self . _element_1my ] ) , 6 )
else :
raise TypeError ( ) |
def set_aliases_and_defaults ( self , aliases_config = None , default_properties = None ) :
"""Set the alias config and defaults to use . Typically used when
switching to a collection with a different schema .
Args :
aliases _ config :
An alias dict to use . Defaults to None , which means the default
aliases defined in " aliases . json " is used . See constructor
for format .
default _ properties :
List of property names ( strings ) to use by default , if no
properties are given to the ' properties ' argument of
query ( ) .""" | if aliases_config is None :
with open ( os . path . join ( os . path . dirname ( __file__ ) , "aliases.json" ) ) as f :
d = json . load ( f )
self . aliases = d . get ( "aliases" , { } )
self . default_criteria = d . get ( "defaults" , { } )
else :
self . aliases = aliases_config . get ( "aliases" , { } )
self . default_criteria = aliases_config . get ( "defaults" , { } )
# set default properties
if default_properties is None :
self . _default_props , self . _default_prop_dict = None , None
else :
self . _default_props , self . _default_prop_dict = self . _parse_properties ( default_properties ) |
def _format_line ( self , data , column = 0 , rel_line = 1 ) :
'Formats a line from the data to be the appropriate length' | line_length = len ( data )
if line_length > 140 :
if rel_line == 0 : # Trim from the beginning
data = '... %s' % data [ - 140 : ]
elif rel_line == 1 : # Trim surrounding the error position
if column < 70 :
data = '%s ...' % data [ : 140 ]
elif column > line_length - 70 :
data = '... %s' % data [ - 140 : ]
else :
data = '... %s ...' % data [ column - 70 : column + 70 ]
elif rel_line == 2 : # Trim from the end
data = '%s ...' % data [ : 140 ]
data = unicodehelper . decode ( data )
return data |
def force_orthotropic ( self ) :
r"""Force an orthotropic laminate
The terms
` A _ { 13 } ` , ` A _ { 23 } ` , ` A _ { 31 } ` , ` A _ { 32 } ` ,
` B _ { 13 } ` , ` B _ { 23 } ` , ` B _ { 31 } ` , ` B _ { 32 } ` ,
` D _ { 13 } ` , ` D _ { 23 } ` , ` D _ { 31 } ` , ` D _ { 32 } ` are set to zero to force an
orthotropic laminate .""" | if self . offset != 0. :
raise RuntimeError ( 'Laminates with offset cannot be forced orthotropic!' )
self . A [ 0 , 2 ] = 0.
self . A [ 1 , 2 ] = 0.
self . A [ 2 , 0 ] = 0.
self . A [ 2 , 1 ] = 0.
self . B [ 0 , 2 ] = 0.
self . B [ 1 , 2 ] = 0.
self . B [ 2 , 0 ] = 0.
self . B [ 2 , 1 ] = 0.
self . D [ 0 , 2 ] = 0.
self . D [ 1 , 2 ] = 0.
self . D [ 2 , 0 ] = 0.
self . D [ 2 , 1 ] = 0.
self . ABD [ 0 , 2 ] = 0.
# A16
self . ABD [ 1 , 2 ] = 0.
# A26
self . ABD [ 2 , 0 ] = 0.
# A61
self . ABD [ 2 , 1 ] = 0.
# A62
self . ABD [ 0 , 5 ] = 0.
# B16
self . ABD [ 5 , 0 ] = 0.
# B61
self . ABD [ 1 , 5 ] = 0.
# B26
self . ABD [ 5 , 1 ] = 0.
# B62
self . ABD [ 3 , 2 ] = 0.
# B16
self . ABD [ 2 , 3 ] = 0.
# B61
self . ABD [ 4 , 2 ] = 0.
# B26
self . ABD [ 2 , 4 ] = 0.
# B62
self . ABD [ 3 , 5 ] = 0.
# D16
self . ABD [ 4 , 5 ] = 0.
# D26
self . ABD [ 5 , 3 ] = 0.
# D61
self . ABD [ 5 , 4 ] = 0.
# D62
self . ABDE [ 0 , 2 ] = 0.
# A16
self . ABDE [ 1 , 2 ] = 0.
# A26
self . ABDE [ 2 , 0 ] = 0.
# A61
self . ABDE [ 2 , 1 ] = 0.
# A62
self . ABDE [ 0 , 5 ] = 0.
# B16
self . ABDE [ 5 , 0 ] = 0.
# B61
self . ABDE [ 1 , 5 ] = 0.
# B26
self . ABDE [ 5 , 1 ] = 0.
# B62
self . ABDE [ 3 , 2 ] = 0.
# B16
self . ABDE [ 2 , 3 ] = 0.
# B61
self . ABDE [ 4 , 2 ] = 0.
# B26
self . ABDE [ 2 , 4 ] = 0.
# B62
self . ABDE [ 3 , 5 ] = 0.
# D16
self . ABDE [ 4 , 5 ] = 0.
# D26
self . ABDE [ 5 , 3 ] = 0.
# D61
self . ABDE [ 5 , 4 ] = 0. |
def as_sql ( self , qn , connection = None ) :
"""Create the proper SQL fragment . This inserts something like
" ( T0 . flags & value ) ! = 0 " .
This will be called by Where . as _ sql ( )""" | if self . bit :
return ( "(%s.%s | %d)" % ( qn ( self . table_alias ) , qn ( self . column ) , self . bit . mask ) , [ ] )
return ( "(%s.%s & %d)" % ( qn ( self . table_alias ) , qn ( self . column ) , self . bit . mask ) , [ ] ) |
def add_part ( self , part ) :
"""Function for adding partial pattern to the value
: param part : string or compiled pattern""" | if isinstance ( part , RE_TYPE ) :
part = part . pattern
# Allow U / spmething syntax
if self == '^$' :
return URLPattern ( part , self . separator )
else : # Erase dup separator inbetween
sep = self . separator
return URLPattern ( self . rstrip ( '$' + sep ) + sep + part . lstrip ( sep ) , sep ) |
def process_xml ( self , xml ) :
'''Parse Outcome Request data from XML .''' | root = objectify . fromstring ( xml )
self . message_identifier = str ( root . imsx_POXHeader . imsx_POXRequestHeaderInfo . imsx_messageIdentifier )
try :
result = root . imsx_POXBody . replaceResultRequest
self . operation = REPLACE_REQUEST
# Get result sourced id from resultRecord
self . lis_result_sourcedid = result . resultRecord . sourcedGUID . sourcedId
self . score = str ( result . resultRecord . result . resultScore . textString )
except :
pass
try :
result = root . imsx_POXBody . deleteResultRequest
self . operation = DELETE_REQUEST
# Get result sourced id from resultRecord
self . lis_result_sourcedid = result . resultRecord . sourcedGUID . sourcedId
except :
pass
try :
result = root . imsx_POXBody . readResultRequest
self . operation = READ_REQUEST
# Get result sourced id from resultRecord
self . lis_result_sourcedid = result . resultRecord . sourcedGUID . sourcedId
except :
pass |
def retrieve_csv_data ( rows , row_header = 0 , column_header = 0 , limit_column = 0 ) :
"""Take the data from the rows .""" | return [ row [ row_header : limit_column ] for row in rows [ column_header : ] ] |
def insert ( self , x1 , x2 , name = '' , referedObject = [ ] ) :
"""Insert the segment in it ' s right place and returns it .
If there ' s already a segment S as S . x1 = = x1 and S . x2 = = x2 . S . name will be changed to ' S . name U name ' and the
referedObject will be appended to the already existing list""" | if x1 > x2 :
xx1 , xx2 = x2 , x1
else :
xx1 , xx2 = x1 , x2
rt = None
insertId = None
childrenToRemove = [ ]
for i in range ( len ( self . children ) ) :
if self . children [ i ] . x1 == xx1 and xx2 == self . children [ i ] . x2 :
self . children [ i ] . name = self . children [ i ] . name + ' U ' + name
self . children [ i ] . referedObject . append ( referedObject )
return self . children [ i ]
if self . children [ i ] . x1 <= xx1 and xx2 <= self . children [ i ] . x2 :
return self . children [ i ] . insert ( x1 , x2 , name , referedObject )
elif xx1 <= self . children [ i ] . x1 and self . children [ i ] . x2 <= xx2 :
if rt == None :
if type ( referedObject ) is types . ListType :
rt = SegmentTree ( xx1 , xx2 , name , referedObject , self , self . level + 1 )
else :
rt = SegmentTree ( xx1 , xx2 , name , [ referedObject ] , self , self . level + 1 )
insertId = i
rt . __addChild ( self . children [ i ] )
self . children [ i ] . father = rt
childrenToRemove . append ( self . children [ i ] )
elif xx1 <= self . children [ i ] . x1 and xx2 <= self . children [ i ] . x2 :
insertId = i
break
if rt != None :
self . __addChild ( rt , insertId )
for c in childrenToRemove :
self . children . remove ( c )
else :
if type ( referedObject ) is types . ListType :
rt = SegmentTree ( xx1 , xx2 , name , referedObject , self , self . level + 1 )
else :
rt = SegmentTree ( xx1 , xx2 , name , [ referedObject ] , self , self . level + 1 )
if insertId != None :
self . __addChild ( rt , insertId )
else :
self . __addChild ( rt )
return rt |
def Get ( self ) :
"""Return a GrrMessage instance from the transaction log or None .""" | try :
with io . open ( self . logfile , "rb" ) as fd :
data = fd . read ( self . max_log_size )
except ( IOError , OSError ) :
return
try :
if data :
return rdf_flows . GrrMessage . FromSerializedString ( data )
except ( message . Error , rdfvalue . Error ) :
return |
def var_quadratic_sum ( A , C , H , beta , x0 ) :
r"""Computes the expected discounted quadratic sum
. . math : :
q ( x _ 0 ) = \ mathbb { E } \ Big [ \ sum _ { t = 0 } ^ { \ infty } \ beta ^ t x _ t ' H x _ t \ Big ]
Here : math : ` { x _ t } ` is the VAR process : math : ` x _ { t + 1 } = A x _ t + C w _ t `
with : math : ` { x _ t } ` standard normal and : math : ` x _ 0 ` the initial condition .
Parameters
A : array _ like ( float , ndim = 2)
The matrix described above in description . Should be n x n
C : array _ like ( float , ndim = 2)
The matrix described above in description . Should be n x n
H : array _ like ( float , ndim = 2)
The matrix described above in description . Should be n x n
beta : scalar ( float )
Should take a value in ( 0 , 1)
x _ 0 : array _ like ( float , ndim = 1)
The initial condtion . A conformable array ( of length n , or with
n rows )
Returns
q0 : scalar ( float )
Represents the value : math : ` q ( x _ 0 ) `
Remarks : The formula for computing : math : ` q ( x _ 0 ) ` is
: math : ` q ( x _ 0 ) = x _ 0 ' Q x _ 0 + v `
where
* : math : ` Q ` is the solution to : math : ` Q = H + \ beta A ' Q A ` , and
* : math : ` v = \ frac { trace ( C ' Q C ) \ beta } { ( 1 - \ beta ) } `""" | # = = Make sure that A , C , H and x0 are array _ like = = #
A , C , H = list ( map ( np . atleast_2d , ( A , C , H ) ) )
x0 = np . atleast_1d ( x0 )
# = = Start computations = = #
Q = scipy . linalg . solve_discrete_lyapunov ( sqrt ( beta ) * A . T , H )
cq = dot ( dot ( C . T , Q ) , C )
v = np . trace ( cq ) * beta / ( 1 - beta )
q0 = dot ( dot ( x0 . T , Q ) , x0 ) + v
return q0 |
def dump_to_log ( self ) :
"""Write summary to logger with the name and number of times each event has been
counted .
This function may be called at any point in the process . Counts are not zeroed .""" | if self . _event_dict :
logger . info ( 'Events:' )
for event_str , count_int in sorted ( self . _event_dict . items ( ) ) :
logger . info ( ' {}: {}' . format ( event_str , count_int ) )
else :
logger . info ( 'No Events' ) |
def route_table_delete ( name , resource_group , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Delete a route table .
: param name : The name of the route table to delete .
: param resource _ group : The resource group name assigned to the
route table .
CLI Example :
. . code - block : : bash
salt - call azurearm _ network . route _ table _ delete test - rt - table testgroup''' | result = False
netconn = __utils__ [ 'azurearm.get_client' ] ( 'network' , ** kwargs )
try :
table = netconn . route_tables . delete ( route_table_name = name , resource_group_name = resource_group )
table . wait ( )
result = True
except CloudError as exc :
__utils__ [ 'azurearm.log_cloud_error' ] ( 'network' , str ( exc ) , ** kwargs )
return result |
def bootstrap_css ( ) :
"""Return HTML for Bootstrap CSS .
Adjust url in settings . If no url is returned , we don ' t want this statement
to return any HTML .
This is intended behavior .
Default value : ` ` None ` `
This value is configurable , see Settings section
* * Tag name * * : :
bootstrap _ css
* * Usage * * : :
{ % bootstrap _ css % }
* * Example * * : :
{ % bootstrap _ css % }""" | rendered_urls = [ render_link_tag ( bootstrap_css_url ( ) ) ]
if bootstrap_theme_url ( ) :
rendered_urls . append ( render_link_tag ( bootstrap_theme_url ( ) ) )
return mark_safe ( "" . join ( [ url for url in rendered_urls ] ) ) |
def _pprint_features_dict ( features_dict , indent = 0 , add_prefix = True ) :
"""Pretty - print tfds . features . FeaturesDict .""" | first_last_indent_str = " " * indent
indent_str = " " * ( indent + 4 )
first_line = "%s%s({" % ( first_last_indent_str if add_prefix else "" , type ( features_dict ) . __name__ , )
lines = [ first_line ]
for k in sorted ( list ( features_dict . keys ( ) ) ) :
v = features_dict [ k ]
if isinstance ( v , tfds . features . FeaturesDict ) :
v_str = _pprint_features_dict ( v , indent + 4 , False )
else :
v_str = str ( v )
lines . append ( "%s'%s': %s," % ( indent_str , k , v_str ) )
lines . append ( "%s})" % first_last_indent_str )
return "\n" . join ( lines ) |
def chgid ( name , gid ) :
'''Change the gid for a named group
CLI Example :
. . code - block : : bash
salt ' * ' group . chgid foo 4376''' | if not isinstance ( gid , int ) :
raise SaltInvocationError ( 'gid must be an integer' )
pre_gid = __salt__ [ 'file.group_to_gid' ] ( name )
pre_info = info ( name )
if not pre_info :
raise CommandExecutionError ( 'Group \'{0}\' does not exist' . format ( name ) )
if gid == pre_info [ 'gid' ] :
return True
cmd = [ 'dseditgroup' , '-o' , 'edit' , '-i' , gid , name ]
return __salt__ [ 'cmd.retcode' ] ( cmd , python_shell = False ) == 0 |
def _expectation ( p , kern , feat , none1 , none2 , nghp = None ) :
"""Compute the expectation :
< K _ { X , Z } > _ p ( X )
- K _ { . , . } : : RBF kernel
: return : NxM""" | with params_as_tensors_for ( kern , feat ) : # use only active dimensions
Xcov = kern . _slice_cov ( p . cov )
Z , Xmu = kern . _slice ( feat . Z , p . mu )
D = tf . shape ( Xmu ) [ 1 ]
if kern . ARD :
lengthscales = kern . lengthscales
else :
lengthscales = tf . zeros ( ( D , ) , dtype = settings . float_type ) + kern . lengthscales
chol_L_plus_Xcov = tf . cholesky ( tf . matrix_diag ( lengthscales ** 2 ) + Xcov )
# NxDxD
all_diffs = tf . transpose ( Z ) - tf . expand_dims ( Xmu , 2 )
# NxDxM
exponent_mahalanobis = tf . matrix_triangular_solve ( chol_L_plus_Xcov , all_diffs , lower = True )
# NxDxM
exponent_mahalanobis = tf . reduce_sum ( tf . square ( exponent_mahalanobis ) , 1 )
# NxM
exponent_mahalanobis = tf . exp ( - 0.5 * exponent_mahalanobis )
# NxM
sqrt_det_L = tf . reduce_prod ( lengthscales )
sqrt_det_L_plus_Xcov = tf . exp ( tf . reduce_sum ( tf . log ( tf . matrix_diag_part ( chol_L_plus_Xcov ) ) , axis = 1 ) )
determinants = sqrt_det_L / sqrt_det_L_plus_Xcov
return kern . variance * ( determinants [ : , None ] * exponent_mahalanobis ) |
def send ( signal ) :
"""Send signal .
The signal has a unique identifier that is computed from ( 1 ) the id
of the actor or task sending this signal ( i . e . , the actor or task calling
this function ) , and ( 2 ) an index that is incremented every time this
source sends a signal . This index starts from 1.
Args :
signal : Signal to be sent .""" | if hasattr ( ray . worker . global_worker , "actor_creation_task_id" ) :
source_key = ray . worker . global_worker . actor_id . hex ( )
else : # No actors ; this function must have been called from a task
source_key = ray . worker . global_worker . current_task_id . hex ( )
encoded_signal = ray . utils . binary_to_hex ( cloudpickle . dumps ( signal ) )
ray . worker . global_worker . redis_client . execute_command ( "XADD " + source_key + " * signal " + encoded_signal ) |
def next ( self ) :
"""Returns the next input from this input reader , a record .
Returns :
The next input from this input reader in the form of a record read from
an LevelDB file .
Raises :
StopIteration : The ordered set records has been exhausted .""" | while True :
if not hasattr ( self , "_cur_handle" ) or self . _cur_handle is None : # If there are no more files , StopIteration is raised here
self . _cur_handle = super ( GCSRecordInputReader , self ) . next ( )
if not hasattr ( self , "_record_reader" ) or self . _record_reader is None :
self . _record_reader = records . RecordsReader ( self . _cur_handle )
try :
start_time = time . time ( )
content = self . _record_reader . read ( )
self . _slice_ctx . incr ( self . COUNTER_IO_READ_BYTE , len ( content ) )
self . _slice_ctx . incr ( self . COUNTER_IO_READ_MSEC , int ( time . time ( ) - start_time ) * 1000 )
return content
except EOFError :
self . _cur_handle = None
self . _record_reader = None |
def similarity ( self , other ) :
"""Get similarity as a ratio of the two texts .""" | ratio = SequenceMatcher ( a = self . value , b = other . value ) . ratio ( )
similarity = self . Similarity ( ratio )
return similarity |
def check_lines ( self , lines , i ) :
"""check lines have less than a maximum number of characters .
It ignored lines with long URLs .""" | maxChars = self . config . max_line_length
for line in lines . splitlines ( ) :
if len ( line ) > maxChars :
if 'http://' in line or 'https://' in line :
continue
self . add_message ( 'C0301' , line = i , args = ( len ( line ) , maxChars ) )
i += 1 |
def extract_objects ( self , fname , type_filter = None ) :
'''Extract objects from a source file
Args :
fname ( str ) : Name of file to read from
type _ filter ( class , optional ) : Object class to filter results
Returns :
List of objects extracted from the file .''' | objects = [ ]
if fname in self . object_cache :
objects = self . object_cache [ fname ]
else :
with io . open ( fname , 'rt' , encoding = 'utf-8' ) as fh :
text = fh . read ( )
objects = parse_verilog ( text )
self . object_cache [ fname ] = objects
if type_filter :
objects = [ o for o in objects if isinstance ( o , type_filter ) ]
return objects |
def _Rforce ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ Rforce
PURPOSE :
evaluate the radial force for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the radial force
HISTORY :
2011-04-10 - Written - Bovy ( NYU )
2018-10-18 - Updated for general object potential - James Lane ( UofT )""" | # Cylindrical distance
Rdist = _cylR ( R , phi , self . _orb . R ( t ) , self . _orb . phi ( t ) )
# Difference vector
( xd , yd , zd ) = _cyldiff ( self . _orb . R ( t ) , self . _orb . phi ( t ) , self . _orb . z ( t ) , R , phi , z )
# Evaluate cylindrical radial force
RF = evaluateRforces ( self . _pot , Rdist , zd , use_physical = False )
# Return R force , negative of radial vector to evaluation location .
return - RF * ( nu . cos ( phi ) * xd + nu . sin ( phi ) * yd ) / Rdist |
def check_db ( * names , ** kwargs ) :
'''. . versionadded : : 0.17.0
Returns a dict containing the following information for each specified
package :
1 . A key ` ` found ` ` , which will be a boolean value denoting if a match was
found in the package database .
2 . If ` ` found ` ` is ` ` False ` ` , then a second key called ` ` suggestions ` ` will
be present , which will contain a list of possible matches . This list
will be empty if the package name was specified in ` ` category / pkgname ` `
format , since the suggestions are only intended to disambiguate
ambiguous package names ( ones submitted without a category ) .
CLI Examples :
. . code - block : : bash
salt ' * ' pkg . check _ db < package1 > < package2 > < package3 >''' | # # # NOTE : kwargs is not used here but needs to be present due to it being
# # # required in the check _ db function in other package providers .
ret = { }
for name in names :
if name in ret :
log . warning ( 'pkg.check_db: Duplicate package name \'%s\' submitted' , name )
continue
if '/' not in name :
ret . setdefault ( name , { } ) [ 'found' ] = False
ret [ name ] [ 'suggestions' ] = porttree_matches ( name )
else :
ret . setdefault ( name , { } ) [ 'found' ] = name in _allnodes ( )
if ret [ name ] [ 'found' ] is False :
ret [ name ] [ 'suggestions' ] = [ ]
return ret |
def getResourceValue ( self , ep , res , cbfn = "" , noResp = False , cacheOnly = False ) :
"""Get value of a specific resource on a specific endpoint .
: param str ep : name of endpoint
: param str res : name of resource
: param fnptr cbfn : Optional - callback function to be called on completion
: param bool noResp : Optional - specify no response necessary from endpoint
: param bool cacheOnly : Optional - get results from cache on connector , do not wake up endpoint
: return : value of the resource , usually a string
: rtype : asyncResult""" | q = { }
result = asyncResult ( callback = cbfn )
# set callback fn for use in async handler
result . endpoint = ep
result . resource = res
if noResp or cacheOnly :
q [ 'noResp' ] = 'true' if noResp == True else 'false'
q [ 'cacheOnly' ] = 'true' if cacheOnly == True else 'false'
# make query
data = self . _getURL ( "/endpoints/" + ep + res , query = q )
result . fill ( data )
if data . status_code == 200 : # immediate success
result . error = False
result . is_done = True
if cbfn :
cbfn ( result )
return result
elif data . status_code == 202 :
self . database [ 'async-responses' ] [ json . loads ( data . content ) [ "async-response-id" ] ] = result
else : # fail
result . error = response_codes ( "resource" , data . status_code )
result . is_done = True
result . raw_data = data . content
result . status_code = data . status_code
return result |
def search ( self , terms ) :
"""returns a dict { " name " : " image _ dict " }""" | images = { }
response = self . _request_builder ( 'GET' , 'search' , params = { 'q' : terms } )
if self . _validate_response ( response ) :
body = json . loads ( response . content . decode ( 'utf-8' ) ) [ 'results' ]
for image in body :
images [ image [ 'name' ] ] = image
return images |
def do_eof ( self , line ) :
"""Exit on system EOF character .""" | d1_cli . impl . util . print_info ( "" )
self . do_exit ( line ) |
def cleanup_environment ( ) :
"""Shutdown the ZEO server process running in another thread and cleanup the
temporary directory .""" | SERV . terminate ( )
shutil . rmtree ( TMP_PATH )
if os . path . exists ( TMP_PATH ) :
os . rmdir ( TMP_PATH )
global TMP_PATH
TMP_PATH = None |
def parse_response ( self , response , header = None ) :
"""Parses the response message .
The following graph shows the structure of response messages .
+ - - + data sep + < - +
+ - - > | header + - - - - - > + header sep + - - - + - - - > + data + - - - - + - - - - +
| + - - + data sep + < - + |
+ - - - - - + - - - > + data + - - - - + - - - - +""" | response = response . decode ( self . encoding )
if header :
header = "" . join ( ( self . resp_prefix , header , self . resp_header_sep ) )
if not response . startswith ( header ) :
raise IEC60488 . ParsingError ( 'Response header mismatch' )
response = response [ len ( header ) : ]
return response . split ( self . resp_data_sep ) |
def page ( self , title : str , ns : Namespace = Namespace . MAIN , unquote : bool = False , ) -> 'WikipediaPage' :
"""Constructs Wikipedia page with title ` title ` .
Creating ` WikipediaPage ` object is always the first step for extracting any information .
Example : :
wiki _ wiki = wikipediaapi . Wikipedia ( ' en ' )
page _ py = wiki _ wiki . page ( ' Python _ ( programming _ language ) ' )
print ( page _ py . title )
# Python ( programming language )
wiki _ hi = wikipediaapi . Wikipedia ( ' hi ' )
page _ hi _ py = wiki _ hi . article (
title = ' % E0 % A4 % AA % E0 % A4 % BE % E0 % A4%87 % E0 % A4 % A5 % E0 % A4 % A8 ' ,
unquote = True ,
print ( page _ hi _ py . title )
# पाइथन
: param title : page title as used in Wikipedia URL
: param ns : : class : ` Namespace `
: param unquote : if true it will unquote title
: return : object representing : class : ` WikipediaPage `""" | if unquote :
title = parse . unquote ( title )
return WikipediaPage ( self , title = title , ns = ns , language = self . language ) |
def cli ( obj , origin , tags , timeout , customer , delete ) :
"""Send or delete a heartbeat .""" | client = obj [ 'client' ]
if delete :
client . delete_heartbeat ( delete )
else :
try :
heartbeat = client . heartbeat ( origin = origin , tags = tags , timeout = timeout , customer = customer )
except Exception as e :
click . echo ( 'ERROR: {}' . format ( e ) )
sys . exit ( 1 )
click . echo ( heartbeat . id ) |
def getInstance ( cls , * args ) :
'''Returns a singleton instance of the class''' | if not cls . __singleton :
cls . __singleton = Heroku ( * args )
return cls . __singleton |
def delete ( self ) :
"""Removes current SyncItem""" | url = SyncList . key . format ( clientId = self . clientIdentifier )
url += '/' + str ( self . id )
self . _server . query ( url , self . _server . _session . delete ) |
def _init_redis ( redis_spec ) :
"""Return a StrictRedis instance or None based on redis _ spec .
redis _ spec may be None , a Redis URL , or a StrictRedis instance""" | if not redis_spec :
return
if isinstance ( redis_spec , six . string_types ) :
return redis . StrictRedis . from_url ( redis_spec )
# assume any other value is a valid instance
return redis_spec |
def limit ( self , limit ) :
"""Sets the limit of this ListEmployeeWagesRequest .
Maximum number of Employee Wages to return per page . Can range between 1 and 200 . The default is the maximum at 200.
: param limit : The limit of this ListEmployeeWagesRequest .
: type : int""" | if limit is None :
raise ValueError ( "Invalid value for `limit`, must not be `None`" )
if limit > 200 :
raise ValueError ( "Invalid value for `limit`, must be a value less than or equal to `200`" )
if limit < 1 :
raise ValueError ( "Invalid value for `limit`, must be a value greater than or equal to `1`" )
self . _limit = limit |
def from_xdr_object ( cls , op_xdr_object ) :
"""Creates a : class : ` CreateAccount ` object from an XDR Operation
object .""" | if not op_xdr_object . sourceAccount :
source = None
else :
source = encode_check ( 'account' , op_xdr_object . sourceAccount [ 0 ] . ed25519 ) . decode ( )
destination = encode_check ( 'account' , op_xdr_object . body . createAccountOp . destination . ed25519 ) . decode ( )
starting_balance = Operation . from_xdr_amount ( op_xdr_object . body . createAccountOp . startingBalance )
return cls ( source = source , destination = destination , starting_balance = starting_balance , ) |
def mkdir ( self , pathobj , _ ) :
"""Creates remote directory
Note that this operation is not recursive""" | if not pathobj . drive or not pathobj . root :
raise RuntimeError ( "Full path required: '%s'" % str ( pathobj ) )
if pathobj . exists ( ) :
raise OSError ( 17 , "File exists: '%s'" % str ( pathobj ) )
url = str ( pathobj ) + '/'
text , code = self . rest_put ( url , session = pathobj . session , verify = pathobj . verify , cert = pathobj . cert )
if not code == 201 :
raise RuntimeError ( "%s %d" % ( text , code ) ) |
def truncated_normal_log_likelihood ( params , low , high , data ) :
"""Calculate the log likelihood of the truncated normal distribution .
Args :
params : tuple with ( mean , std ) , the parameters under which we evaluate the model
low ( float ) : the lower truncation bound
high ( float ) : the upper truncation bound
data ( ndarray ) : the one dimension list of data points for which we want to calculate the likelihood
Returns :
float : the negative log likelihood of observing the given data under the given parameters .
This is meant to be used in minimization routines .""" | mu = params [ 0 ]
sigma = params [ 1 ]
if sigma == 0 :
return np . inf
ll = np . sum ( norm . logpdf ( data , mu , sigma ) )
ll -= len ( data ) * np . log ( ( norm . cdf ( high , mu , sigma ) - norm . cdf ( low , mu , sigma ) ) )
return - ll |
def parser ( self , lines ) :
"""Given a set of lines parse the into a MOP Header""" | while len ( lines ) > 0 :
if lines [ 0 ] . startswith ( '##' ) and lines [ 1 ] . startswith ( '# ' ) : # A two - line keyword / value line starts here .
self . _header_append ( lines . pop ( 0 ) , lines . pop ( 0 ) )
elif lines [ 0 ] . startswith ( '# ' ) : # Lines with single comments are exposure numbers unless preceeded by double comment line
self . _append_file_id ( lines . pop ( 0 ) )
elif lines [ 0 ] . startswith ( '##' ) : # Double comment lines without a single comment following are column headers for dataset .
self . _set_column_names ( lines . pop ( 0 ) [ 2 : ] )
else : # Last line of the header reached .
return self
raise IOError ( "Failed trying to read header" ) |
def getallkeys ( self , key , failobj = None ) :
"""Returns a list of the full key names ( not the items )
for all the matching values for key . The list will
contain a single entry for unambiguous matches and
multiple entries for ambiguous matches .""" | if self . mmkeys is None :
self . _mmInit ( )
return self . mmkeys . get ( key , failobj ) |
def strategyKLogN ( kls , n , k = 4 ) :
"""Return the directory names to preserve under the KLogN purge strategy .""" | assert ( k > 1 )
s = set ( [ n ] )
i = 0
while k ** i <= n :
s . update ( range ( n , n - k * k ** i , - k ** i ) )
i += 1
n -= n % k ** i
return set ( map ( str , filter ( lambda x : x >= 0 , s ) ) ) |
def plot_discrete ( self , show = False , annotations = True ) :
"""Plot the closed curves of the path .""" | import matplotlib . pyplot as plt
axis = plt . axes ( )
axis . set_aspect ( 'equal' , 'datalim' )
for i , points in enumerate ( self . discrete ) :
color = [ 'g' , 'k' ] [ i in self . root ]
axis . plot ( * points . T , color = color )
if annotations :
for e in self . entities :
if not hasattr ( e , 'plot' ) :
continue
e . plot ( self . vertices )
if show :
plt . show ( )
return axis |
def gender ( self , iso5218 : bool = False , symbol : bool = False ) -> Union [ str , int ] :
"""Get a random gender .
Get a random title of gender , code for the representation
of human sexes is an international standard that defines a
representation of human sexes through a language - neutral single - digit
code or symbol of gender .
: param iso5218:
Codes for the representation of human sexes is an international
standard ( 0 - not known , 1 - male , 2 - female , 9 - not applicable ) .
: param symbol : Symbol of gender .
: return : Title of gender .
: Example :
Male""" | if iso5218 :
return self . random . choice ( [ 0 , 1 , 2 , 9 ] )
if symbol :
return self . random . choice ( GENDER_SYMBOLS )
return self . random . choice ( self . _data [ 'gender' ] ) |
def drop ( n , it , constructor = list ) :
"""> > > first ( 10 , drop ( 10 , xrange ( sys . maxint ) , iter ) )
[10 , 11 , 12 , 13 , 14 , 15 , 16 , 17 , 18 , 19]""" | return constructor ( itertools . islice ( it , n , None ) ) |
def begin_table ( self , column_count ) :
"""Begins a table with the given ' column _ count ' , required to automatically
create the right amount of columns when adding items to the rows""" | self . table_columns = column_count
self . table_columns_left = 0
self . write ( '<table>' ) |
def del_edge ( self , u_vertex , v_vertex ) :
"""Removes the edge ` ` u _ vertex - > v _ vertex ` ` from the graph if the edge is present .
: param u _ vertex : Vertex
: param v _ vertex : Vertex
: return : ` ` True ` ` if the existing edge was removed . ` ` False ` ` otherwise .""" | if self . is_edge ( u_vertex , v_vertex ) :
self . indegrees [ v_vertex ] -= 1
self . adj_dict [ u_vertex ] . remove ( v_vertex )
return True
return False |
def collect_parameters ( uri_query = '' , body = [ ] , headers = None , exclude_oauth_signature = True , with_realm = False ) :
"""* * Parameter Sources * *
Parameters starting with ` oauth _ ` will be unescaped .
Body parameters must be supplied as a dict , a list of 2 - tuples , or a
formencoded query string .
Headers must be supplied as a dict .
Per ` section 3.4.1.3.1 ` _ of the spec .
For example , the HTTP request : :
POST / request ? b5 = % 3D % 253D & a3 = a & c % 40 = & a2 = r % 20b HTTP / 1.1
Host : example . com
Content - Type : application / x - www - form - urlencoded
Authorization : OAuth realm = " Example " ,
oauth _ consumer _ key = " 9djdj82h48djs9d2 " ,
oauth _ token = " kkk9d7dh3k39sjv7 " ,
oauth _ signature _ method = " HMAC - SHA1 " ,
oauth _ timestamp = " 137131201 " ,
oauth _ nonce = " 7d8f3e4a " ,
oauth _ signature = " djosJKDKJSD8743243%2Fjdk33klY % 3D "
c2 & a3 = 2 + q
contains the following ( fully decoded ) parameters used in the
signature base sting : :
| Name | Value |
| b5 | = % 3D |
| a3 | a |
| a2 | r b |
| oauth _ consumer _ key | 9djdj82h48djs9d2 |
| oauth _ token | kkk9d7dh3k39sjv7 |
| oauth _ signature _ method | HMAC - SHA1 |
| oauth _ timestamp | 137131201 |
| oauth _ nonce | 7d8f3e4a |
| c2 | |
| a3 | 2 q |
Note that the value of " b5 " is " = % 3D " and not " = = " . Both " c @ " and
" c2 " have empty values . While the encoding rules specified in this
specification for the purpose of constructing the signature base
string exclude the use of a " + " character ( ASCII code 43 ) to
represent an encoded space character ( ASCII code 32 ) , this practice
is widely used in " application / x - www - form - urlencoded " encoded values ,
and MUST be properly decoded , as demonstrated by one of the " a3"
parameter instances ( the " a3 " parameter is used twice in this
request ) .
. . _ ` section 3.4.1.3.1 ` : https : / / tools . ietf . org / html / rfc5849 # section - 3.4.1.3.1""" | headers = headers or { }
params = [ ]
# The parameters from the following sources are collected into a single
# list of name / value pairs :
# * The query component of the HTTP request URI as defined by
# ` RFC3986 , Section 3.4 ` _ . The query component is parsed into a list
# of name / value pairs by treating it as an
# " application / x - www - form - urlencoded " string , separating the names
# and values and decoding them as defined by
# ` W3C . REC - html40-19980424 ` _ , Section 17.13.4.
# . . _ ` RFC3986 , Section 3.4 ` : https : / / tools . ietf . org / html / rfc3986 # section - 3.4
# . . _ ` W3C . REC - html40-19980424 ` : https : / / tools . ietf . org / html / rfc5849 # ref - W3C . REC - html40-19980424
if uri_query :
params . extend ( urldecode ( uri_query ) )
# * The OAuth HTTP " Authorization " header field ( ` Section 3.5.1 ` _ ) if
# present . The header ' s content is parsed into a list of name / value
# pairs excluding the " realm " parameter if present . The parameter
# values are decoded as defined by ` Section 3.5.1 ` _ .
# . . _ ` Section 3.5.1 ` : https : / / tools . ietf . org / html / rfc5849 # section - 3.5.1
if headers :
headers_lower = dict ( ( k . lower ( ) , v ) for k , v in headers . items ( ) )
authorization_header = headers_lower . get ( 'authorization' )
if authorization_header is not None :
params . extend ( [ i for i in utils . parse_authorization_header ( authorization_header ) if with_realm or i [ 0 ] != 'realm' ] )
# * The HTTP request entity - body , but only if all of the following
# conditions are met :
# * The entity - body is single - part .
# * The entity - body follows the encoding requirements of the
# " application / x - www - form - urlencoded " content - type as defined by
# ` W3C . REC - html40-19980424 ` _ .
# * The HTTP request entity - header includes the " Content - Type "
# header field set to " application / x - www - form - urlencoded " .
# . . _ ` W3C . REC - html40-19980424 ` : https : / / tools . ietf . org / html / rfc5849 # ref - W3C . REC - html40-19980424
# TODO : enforce header param inclusion conditions
bodyparams = extract_params ( body ) or [ ]
params . extend ( bodyparams )
# ensure all oauth params are unescaped
unescaped_params = [ ]
for k , v in params :
if k . startswith ( 'oauth_' ) :
v = utils . unescape ( v )
unescaped_params . append ( ( k , v ) )
# The " oauth _ signature " parameter MUST be excluded from the signature
# base string if present .
if exclude_oauth_signature :
unescaped_params = list ( filter ( lambda i : i [ 0 ] != 'oauth_signature' , unescaped_params ) )
return unescaped_params |
def parse_metadata ( section ) :
"""Given the first part of a slide , returns metadata associated with it .""" | metadata = { }
metadata_lines = section . split ( '\n' )
for line in metadata_lines :
colon_index = line . find ( ':' )
if colon_index != - 1 :
key = line [ : colon_index ] . strip ( )
val = line [ colon_index + 1 : ] . strip ( )
metadata [ key ] = val
return metadata |
def simxSetUIButtonLabel ( clientID , uiHandle , uiButtonID , upStateLabel , downStateLabel , operationMode ) :
'''Please have a look at the function description / documentation in the V - REP user manual''' | if sys . version_info [ 0 ] == 3 :
if type ( upStateLabel ) is str :
upStateLabel = upStateLabel . encode ( 'utf-8' )
if type ( downStateLabel ) is str :
downStateLabel = downStateLabel . encode ( 'utf-8' )
return c_SetUIButtonLabel ( clientID , uiHandle , uiButtonID , upStateLabel , downStateLabel , operationMode ) |
def _from_json ( json_data ) :
"""Creates a BoundingBox from json data .
: param json _ data : The raw json data to parse
: type json _ data : dict
: returns : BoundingBox""" | if len ( json_data ) >= 6 :
return BoundingBox ( Coordinate ( _parse_float ( json_data [ 0 ] ) , _parse_float ( json_data [ 1 ] ) , _parse_float ( json_data [ 2 ] ) ) , Coordinate ( _parse_float ( json_data [ 3 ] ) , _parse_float ( json_data [ 4 ] ) , _parse_float ( json_data [ 5 ] ) ) )
else :
raise USGSException ( "The bounding box information was incomplete." ) |
def deploy ( self , initial_instance_count , instance_type , accelerator_type = None , endpoint_name = None , update_endpoint = False , tags = None , kms_key = None ) :
"""Deploy this ` ` Model ` ` to an ` ` Endpoint ` ` and optionally return a ` ` Predictor ` ` .
Create a SageMaker ` ` Model ` ` and ` ` EndpointConfig ` ` , and deploy an ` ` Endpoint ` ` from this ` ` Model ` ` .
If ` ` self . predictor _ cls ` ` is not None , this method returns a the result of invoking
` ` self . predictor _ cls ` ` on the created endpoint name .
The name of the created model is accessible in the ` ` name ` ` field of this ` ` Model ` ` after deploy returns
The name of the created endpoint is accessible in the ` ` endpoint _ name ` `
field of this ` ` Model ` ` after deploy returns .
Args :
instance _ type ( str ) : The EC2 instance type to deploy this Model to . For example , ' ml . p2 . xlarge ' .
initial _ instance _ count ( int ) : The initial number of instances to run in the
` ` Endpoint ` ` created from this ` ` Model ` ` .
accelerator _ type ( str ) : Type of Elastic Inference accelerator to deploy this model for model loading
and inference , for example , ' ml . eia1 . medium ' . If not specified , no Elastic Inference accelerator
will be attached to the endpoint .
For more information : https : / / docs . aws . amazon . com / sagemaker / latest / dg / ei . html
endpoint _ name ( str ) : The name of the endpoint to create ( default : None ) .
If not specified , a unique endpoint name will be created .
update _ endpoint ( bool ) : Flag to update the model in an existing Amazon SageMaker endpoint .
If True , this will deploy a new EndpointConfig to an already existing endpoint and delete resources
corresponding to the previous EndpointConfig . If False , a new endpoint will be created . Default : False
tags ( List [ dict [ str , str ] ] ) : The list of tags to attach to this specific endpoint .
kms _ key ( str ) : The ARN of the KMS key that is used to encrypt the data on the
storage volume attached to the instance hosting the endpoint .
Returns :
callable [ string , sagemaker . session . Session ] or None : Invocation of ` ` self . predictor _ cls ` ` on
the created endpoint name , if ` ` self . predictor _ cls ` ` is not None . Otherwise , return None .""" | if not self . sagemaker_session :
if instance_type in ( 'local' , 'local_gpu' ) :
self . sagemaker_session = local . LocalSession ( )
else :
self . sagemaker_session = session . Session ( )
if self . role is None :
raise ValueError ( "Role can not be null for deploying a model" )
compiled_model_suffix = '-' . join ( instance_type . split ( '.' ) [ : - 1 ] )
if self . _is_compiled_model :
self . name += compiled_model_suffix
self . _create_sagemaker_model ( instance_type , accelerator_type , tags )
production_variant = sagemaker . production_variant ( self . name , instance_type , initial_instance_count , accelerator_type = accelerator_type )
if endpoint_name :
self . endpoint_name = endpoint_name
else :
self . endpoint_name = self . name
if self . _is_compiled_model and not self . endpoint_name . endswith ( compiled_model_suffix ) :
self . endpoint_name += compiled_model_suffix
if update_endpoint :
endpoint_config_name = self . sagemaker_session . create_endpoint_config ( name = self . name , model_name = self . name , initial_instance_count = initial_instance_count , instance_type = instance_type , accelerator_type = accelerator_type , tags = tags , kms_key = kms_key )
self . sagemaker_session . update_endpoint ( self . endpoint_name , endpoint_config_name )
else :
self . sagemaker_session . endpoint_from_production_variants ( self . endpoint_name , [ production_variant ] , tags , kms_key )
if self . predictor_cls :
return self . predictor_cls ( self . endpoint_name , self . sagemaker_session ) |
def mv_connect_generators ( mv_grid_district , graph , debug = False ) :
"""Connect MV generators to MV grid
Args
mv _ grid _ district : MVGridDistrictDing0
MVGridDistrictDing0 object for which the connection process has to be
done
graph : : networkx : ` NetworkX Graph Obj < > `
NetworkX graph object with nodes
debug : bool , defaults to False
If True , information is printed during process .
Returns
: networkx : ` NetworkX Graph Obj < > `
NetworkX graph object with nodes and newly created branches""" | generator_buffer_radius = cfg_ding0 . get ( 'mv_connect' , 'generator_buffer_radius' )
generator_buffer_radius_inc = cfg_ding0 . get ( 'mv_connect' , 'generator_buffer_radius_inc' )
# WGS84 ( conformal ) to ETRS ( equidistant ) projection
proj1 = partial ( pyproj . transform , pyproj . Proj ( init = 'epsg:4326' ) , # source coordinate system
pyproj . Proj ( init = 'epsg:3035' ) )
# destination coordinate system
# ETRS ( equidistant ) to WGS84 ( conformal ) projection
proj2 = partial ( pyproj . transform , pyproj . Proj ( init = 'epsg:3035' ) , # source coordinate system
pyproj . Proj ( init = 'epsg:4326' ) )
# destination coordinate system
for generator in sorted ( mv_grid_district . mv_grid . generators ( ) , key = lambda x : repr ( x ) ) : # = = = = = voltage level 4 : generator has to be connected to MV station = = = = =
if generator . v_level == 4 :
mv_station = mv_grid_district . mv_grid . station ( )
branch_length = calc_geo_dist_vincenty ( generator , mv_station )
# TODO : set branch type to something reasonable ( to be calculated )
branch_kind = mv_grid_district . mv_grid . default_branch_kind
branch_type = mv_grid_district . mv_grid . default_branch_type
branch = BranchDing0 ( length = branch_length , kind = branch_kind , type = branch_type , ring = None )
graph . add_edge ( generator , mv_station , branch = branch )
if debug :
logger . debug ( 'Generator {0} was connected to {1}' . format ( generator , mv_station ) )
# = = = = = voltage level 5 : generator has to be connected to MV grid ( next - neighbor ) = = = = =
elif generator . v_level == 5 :
generator_shp = transform ( proj1 , generator . geo_data )
# get branches within a the predefined radius ` generator _ buffer _ radius `
branches = calc_geo_branches_in_buffer ( generator , mv_grid_district . mv_grid , generator_buffer_radius , generator_buffer_radius_inc , proj1 )
# calc distance between generator and grid ' s lines - > find nearest line
conn_objects_min_stack = find_nearest_conn_objects ( generator_shp , branches , proj1 , conn_dist_weight = 1 , debug = debug , branches_only = False )
# connect !
# go through the stack ( from nearest to most far connection target object )
generator_connected = False
for dist_min_obj in conn_objects_min_stack : # Note 1 : conn _ dist _ ring _ mod = 0 to avoid re - routing of existent lines
# Note 2 : In connect _ node ( ) , the default cable / line type of grid is used . This is reasonable since
# the max . allowed power of the smallest possible cable / line type ( 3.64 MVA for overhead
# line of type 48 - AL1/8 - ST1A ) exceeds the max . allowed power of a generator ( 4.5 MVA ( dena ) )
# ( if connected separately ! )
target_obj_result = connect_node ( generator , generator_shp , mv_grid_district . mv_grid , dist_min_obj , proj2 , graph , conn_dist_ring_mod = 0 , debug = debug )
if target_obj_result is not None :
if debug :
logger . debug ( 'Generator {0} was connected to {1}' . format ( generator , target_obj_result ) )
generator_connected = True
break
if not generator_connected and debug :
logger . debug ( 'Generator {0} could not be connected, try to ' 'increase the parameter `generator_buffer_radius` in ' 'config file `config_calc.cfg` to gain more possible ' 'connection points.' . format ( generator ) )
return graph |
def prettify ( elem ) : # from xml . etree . ElementTree import Element , SubElement , Comment , tostring
from xml . etree import ElementTree
from xml . dom import minidom
"""Return a pretty - printed XML string for the Element .""" | rough_string = ElementTree . tostring ( elem , 'utf-8' )
reparsed = minidom . parseString ( rough_string )
return reparsed . toprettyxml ( indent = " " ) |
def add_unit ( unit , ** kwargs ) :
"""Add the unit defined into the object " unit " to the DB
If unit [ " project _ id " ] is None it means that the unit is global , otherwise is property of a project
If the unit exists emits an exception
A minimal example :
. . code - block : : python
new _ unit = dict (
name = ' Teaspoons per second ' ,
abbreviation = ' tsp s ^ - 1 ' ,
cf = 0 , # Constant conversion factor
lf = 1.47867648e - 05 , # Linear conversion factor
dimension _ id = 2,
description = ' A flow of one teaspoon per second . ' ,
add _ unit ( new _ unit )""" | new_unit = Unit ( )
new_unit . dimension_id = unit [ "dimension_id" ]
new_unit . name = unit [ 'name' ]
# Needed to uniform abbr to abbreviation
new_unit . abbreviation = unit [ 'abbreviation' ]
# Needed to uniform into to description
new_unit . description = unit [ 'description' ]
new_unit . lf = unit [ 'lf' ]
new_unit . cf = unit [ 'cf' ]
if ( 'project_id' in unit ) and ( unit [ 'project_id' ] is not None ) : # Adding dimension to the " user " dimensions list
new_unit . project_id = unit [ 'project_id' ]
# Save on DB
db . DBSession . add ( new_unit )
db . DBSession . flush ( )
return JSONObject ( new_unit ) |
def add_user ( username , password ) :
"""CLI Parameter to add a user to the database
: param username :
: param password :
: return : JSON status output""" | user_model = Query ( )
if db . search ( user_model . username == username ) :
return { 'error' : 'User {0} already exists' . format ( username ) }
salt = hashlib . sha512 ( str ( os . urandom ( 64 ) ) . encode ( 'utf-8' ) ) . hexdigest ( )
password = hash_password ( password , salt )
api_key = gen_api_key ( username )
user = { 'username' : username , 'password' : password , 'salt' : salt , 'api_key' : api_key }
user_id = db . insert ( user )
return { 'result' : 'success' , 'eid' : user_id , 'user_created' : user } |
def uint8_3 ( self , val1 , val2 , val3 ) :
"""append a frame containing 3 uint8""" | try :
self . msg += [ pack ( "BBB" , val1 , val2 , val3 ) ]
except struct . error :
raise ValueError ( "Expected uint8" )
return self |
def p_const_expression_floatnum ( self , p ) :
'const _ expression : floatnumber' | p [ 0 ] = FloatConst ( p [ 1 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def post ( self , endpoint , params = None , version = '1.1' , json_encoded = False ) :
"""Shortcut for POST requests via : class : ` request `""" | return self . request ( endpoint , 'POST' , params = params , version = version , json_encoded = json_encoded ) |
def bytes_to_c_array ( data ) :
"""Make a C array using the given string .""" | chars = [ "'{}'" . format ( encode_escape ( i ) ) for i in decode_escape ( data ) ]
return ', ' . join ( chars ) + ', 0' |
def write_meta ( self , role ) :
"""Write out a new meta file .""" | meta_file = utils . file_to_string ( self . paths [ "meta" ] )
self . update_gen_report ( role , "meta" , meta_file ) |
def save ( self , * args , ** kwargs ) :
'''Don ' t save any passed values related to a type of discount
that is not the specified type''' | if self . discountType != self . DiscountType . flatPrice :
self . onlinePrice = None
self . doorPrice = None
if self . discountType != self . DiscountType . dollarDiscount :
self . dollarDiscount = None
if self . discountType != self . DiscountType . percentDiscount :
self . percentDiscount = None
self . percentUniversallyApplied = False
super ( DiscountCombo , self ) . save ( * args , ** kwargs ) |
def proper ( self , x ) :
"""cleans fitness vector""" | x [ x < 0 ] = self . max_fit
x [ np . isnan ( x ) ] = self . max_fit
x [ np . isinf ( x ) ] = self . max_fit
return x |
def get_numeric_value ( string_value ) :
"""parses string _ value and returns only number - like part""" | num_chars = [ '.' , '+' , '-' ]
number = ''
for c in string_value :
if c . isdigit ( ) or c in num_chars :
number += c
return number |
def _setup_configuration ( self ) :
"""All steps are accepted as classes . Instantiate them with the right
configuration and set them in a local property .""" | self . configuration = dict ( schema_cls = self . schema_cls , allowed_actions = self . allowed_actions , filter_by_fields = self . filter_by_fields )
authentication = self . authentication_cls ( schema_cls = self . schema_cls )
authorization = self . authorization_cls ( )
schema_validation = self . schema_validation_cls ( schema_cls = self . schema_cls )
view_actions = self . view_actions_cls ( )
post_action_hooks = self . post_action_hooks_cls ( schema_cls = self . schema_cls )
response_converter = self . response_converter_cls ( schema_cls = self . schema_cls )
serializer = self . serializer_cls ( schema_cls = self . schema_cls )
data_cleaner = self . data_cleaner_cls ( schema_cls = self . schema_cls )
self . configuration . update ( dict ( authentication = authentication , authorization = authorization , schema_validation = schema_validation , view_actions = view_actions , post_action_hooks = post_action_hooks , response_converter = response_converter , data_cleaner = data_cleaner , serializer = serializer ) ) |
def refine ( video , ** kwargs ) :
"""Refine a video by searching ` TheTVDB < http : / / thetvdb . com / > ` _ .
. . note : :
This refiner only work for instances of : class : ` ~ subliminal . video . Episode ` .
Several attributes can be found :
* : attr : ` ~ subliminal . video . Episode . series `
* : attr : ` ~ subliminal . video . Episode . year `
* : attr : ` ~ subliminal . video . Episode . series _ imdb _ id `
* : attr : ` ~ subliminal . video . Episode . series _ tvdb _ id `
* : attr : ` ~ subliminal . video . Episode . title `
* : attr : ` ~ subliminal . video . Video . imdb _ id `
* : attr : ` ~ subliminal . video . Episode . tvdb _ id `""" | # only deal with Episode videos
if not isinstance ( video , Episode ) :
logger . error ( 'Cannot refine episodes' )
return
# exit if the information is complete
if video . series_tvdb_id and video . tvdb_id :
logger . debug ( 'No need to search' )
return
# search the series
logger . info ( 'Searching series %r' , video . series )
results = search_series ( video . series . lower ( ) )
if not results :
logger . warning ( 'No results for series' )
return
logger . debug ( 'Found %d results' , len ( results ) )
# search for exact matches
matching_results = [ ]
for result in results :
matching_result = { }
# use seriesName and aliases
series_names = [ result [ 'seriesName' ] ]
series_names . extend ( result [ 'aliases' ] )
# parse the original series as series + year or country
original_match = series_re . match ( result [ 'seriesName' ] ) . groupdict ( )
# parse series year
series_year = None
if result [ 'firstAired' ] :
series_year = datetime . strptime ( result [ 'firstAired' ] , '%Y-%m-%d' ) . year
# discard mismatches on year
if video . year and series_year and video . year != series_year :
logger . debug ( 'Discarding series %r mismatch on year %d' , result [ 'seriesName' ] , series_year )
continue
# iterate over series names
for series_name in series_names : # parse as series and year
series , year , country = series_re . match ( series_name ) . groups ( )
if year :
year = int ( year )
# discard mismatches on year
if year and ( video . original_series or video . year != year ) :
logger . debug ( 'Discarding series name %r mismatch on year %d' , series , year )
continue
# match on sanitized series name
if sanitize ( series ) == sanitize ( video . series ) :
logger . debug ( 'Found exact match on series %r' , series_name )
matching_result [ 'match' ] = { 'series' : original_match [ 'series' ] , 'year' : series_year , 'original_series' : original_match [ 'year' ] is None }
break
# add the result on match
if matching_result :
matching_result [ 'data' ] = result
matching_results . append ( matching_result )
# exit if we don ' t have exactly 1 matching result
if not matching_results :
logger . error ( 'No matching series found' )
return
if len ( matching_results ) > 1 :
logger . error ( 'Multiple matches found' )
return
# get the series
matching_result = matching_results [ 0 ]
series = get_series ( matching_result [ 'data' ] [ 'id' ] )
# add series information
logger . debug ( 'Found series %r' , series )
video . series = matching_result [ 'match' ] [ 'series' ]
video . year = matching_result [ 'match' ] [ 'year' ]
video . original_series = matching_result [ 'match' ] [ 'original_series' ]
video . series_tvdb_id = series [ 'id' ]
video . series_imdb_id = series [ 'imdbId' ] or None
# get the episode
logger . info ( 'Getting series episode %dx%d' , video . season , video . episode )
episode = get_series_episode ( video . series_tvdb_id , video . season , video . episode )
if not episode :
logger . warning ( 'No results for episode' )
return
# add episode information
logger . debug ( 'Found episode %r' , episode )
video . tvdb_id = episode [ 'id' ]
video . title = episode [ 'episodeName' ] or None
video . imdb_id = episode [ 'imdbId' ] or None |
def _readMetaAndContent ( self , fid ) :
"""Reads meta data and content from file into WordPressPost ( ) class .
Returns the class
If error , returns None""" | found_meta_separator = False
d = { }
# Read lines until we find the meta separator
while found_meta_separator is False :
line = fid . readline ( )
# Did we find the - - - separator it ?
if line [ 0 : len ( self . META_SEPARATOR ) ] == self . META_SEPARATOR :
found_meta_separator = True
else :
key = line . split ( ':' ) [ 0 ] . strip ( ) . lower ( )
if key in self . WP_META_KEYS :
d [ key ] = line . split ( ':' ) [ 1 ] . strip ( )
else :
logger . error ( "wp: Token '%s' not in list of known tokens %s" % ( key , self . WP_META_KEYS ) )
return None
if not d . has_key ( 'title' ) :
print ( "wp: A title: keyword is required!" )
d [ 'content' ] = fid . readlines ( )
d [ 'content' ] = '' . join ( d [ 'content' ] )
# Let ' s transfer over to a wordpress post class
post = WordPressPost ( )
post . title = d [ 'title' ]
post . content = d [ 'content' ]
post . post_status = 'publish'
post . terms_names = { }
if d . has_key ( 'tags' ) :
post . terms_names [ 'post_tag' ] = d [ 'tags' ] . split ( ',' )
if d . has_key ( 'category' ) :
post . terms_names [ 'category' ] = d [ 'category' ] . split ( ',' )
return post |
def set_editor_doc ( self , doc , force_refresh = False ) :
"""Use the help plugin to show docstring dictionary computed
with introspection plugin from the Editor plugin""" | if ( self . locked and not force_refresh ) :
return
self . switch_to_editor_source ( )
self . _last_editor_doc = doc
self . object_edit . setText ( doc [ 'obj_text' ] )
if self . rich_help :
self . render_sphinx_doc ( doc )
else :
self . set_plain_text ( doc , is_code = False )
if self . dockwidget is not None :
self . dockwidget . blockSignals ( True )
self . __eventually_raise_help ( doc [ 'docstring' ] , force = force_refresh )
if self . dockwidget is not None :
self . dockwidget . blockSignals ( False ) |
def copy ( self , selection , smart_selection_adaption = True ) :
"""Copy all selected items to the clipboard using smart selection adaptation by default
: param selection : the current selection
: param bool smart _ selection _ adaption : flag to enable smart selection adaptation mode
: return :""" | assert isinstance ( selection , Selection )
self . __create_core_and_model_object_copies ( selection , smart_selection_adaption ) |
def write_reg ( self , filename ) :
"""Write a ds9 region file that represents this region as a set of diamonds .
Parameters
filename : str
File to write""" | with open ( filename , 'w' ) as out :
for d in range ( 1 , self . maxdepth + 1 ) :
for p in self . pixeldict [ d ] :
line = "fk5; polygon("
# the following int ( ) gets around some problems with np . int64 that exist prior to numpy v 1.8.1
vectors = list ( zip ( * hp . boundaries ( 2 ** d , int ( p ) , step = 1 , nest = True ) ) )
positions = [ ]
for sky in self . vec2sky ( np . array ( vectors ) , degrees = True ) :
ra , dec = sky
pos = SkyCoord ( ra / 15 , dec , unit = ( u . degree , u . degree ) )
positions . append ( pos . ra . to_string ( sep = ':' , precision = 2 ) )
positions . append ( pos . dec . to_string ( sep = ':' , precision = 2 ) )
line += ',' . join ( positions )
line += ")"
print ( line , file = out )
return |
async def _load_all ( self ) :
'''Load all the appointments from persistent storage''' | to_delete = [ ]
for iden , val in self . _hivedict . items ( ) :
try :
appt = _Appt . unpack ( val )
if appt . iden != iden :
raise s_exc . InconsistentStorage ( mesg = 'iden inconsistency' )
self . _addappt ( iden , appt )
self . _next_indx = max ( self . _next_indx , appt . indx + 1 )
except ( s_exc . InconsistentStorage , s_exc . BadStorageVersion , s_exc . BadTime , TypeError , KeyError , UnicodeDecodeError ) as e :
logger . warning ( 'Invalid appointment %r found in storage: %r. Removing.' , iden , e )
to_delete . append ( iden )
continue
for iden in to_delete :
await self . _hivedict . pop ( iden )
# Make sure we don ' t assign the same index to 2 appointments
if self . appts :
maxindx = max ( appt . indx for appt in self . appts . values ( ) )
self . _next_indx = maxindx + 1 |
def filter_values ( column , default = None ) :
"""Gets a values for a particular filter as a list
This is useful if :
- you want to use a filter box to filter a query where the name of filter box
column doesn ' t match the one in the select statement
- you want to have the ability for filter inside the main query for speed purposes
This searches for " filters " and " extra _ filters " in form _ data for a match
Usage example :
SELECT action , count ( * ) as times
FROM logs
WHERE action in ( { { " ' " + " ' , ' " . join ( filter _ values ( ' action _ type ' ) ) + " ' " } } )
GROUP BY 1
: param column : column / filter name to lookup
: type column : str
: param default : default value to return if there ' s no matching columns
: type default : str
: return : returns a list of filter values
: type : list""" | form_data = json . loads ( request . form . get ( 'form_data' , '{}' ) )
return_val = [ ]
for filter_type in [ 'filters' , 'extra_filters' ] :
if filter_type not in form_data :
continue
for f in form_data [ filter_type ] :
if f [ 'col' ] == column :
for v in f [ 'val' ] :
return_val . append ( v )
if return_val :
return return_val
if default :
return [ default ]
else :
return [ ] |
def get_hpkp_pin ( cls , certificate : cryptography . x509 . Certificate ) -> str :
"""Generate the HTTP Public Key Pinning hash ( RFC 7469 ) for the given certificate .""" | return b64encode ( cls . get_public_key_sha256 ( certificate ) ) . decode ( 'utf-8' ) |
def import_app_credentials ( filename = CREDENTIALS_FILENAME ) :
"""Import app credentials from configuration file .
Parameters
filename ( str )
Name of configuration file .
Returns
credentials ( dict )
All your app credentials and information
imported from the configuration file .""" | with open ( filename , 'r' ) as config_file :
config = safe_load ( config_file )
client_id = config [ 'client_id' ]
client_secret = config [ 'client_secret' ]
redirect_url = config [ 'redirect_url' ]
config_values = [ client_id , client_secret , redirect_url ]
for value in config_values :
if value in DEFAULT_CONFIG_VALUES :
exit ( 'Missing credentials in {}' . format ( filename ) )
credentials = { 'client_id' : client_id , 'client_secret' : client_secret , 'redirect_url' : redirect_url , 'scopes' : set ( config [ 'scopes' ] ) , }
return credentials |
def calculate_auc_covar ( auc_structure1 , auc_structure2 ) :
"""determine AUC covariance due to actives ( covar _ a ) and decoys ( covar _ d )
: param auc _ structure1 : list [ ( id , best _ score , best _ query , status , fpf , tpf ) , . . . , ]
: param auc _ structure2 : list [ ( id , best _ score , best _ query , status , fpf , tpf ) , . . . , ]
: return ( covar _ a , covar _ d ) : tuple""" | # split data by activity class
actives1 , decoys1 = splitter ( auc_structure1 )
actives2 , decoys2 = splitter ( auc_structure2 )
# covariance due to actives = E [ { fpf2 - E ( fpf2 ) a } * { fpf1 - E ( fpf1 ) a } ] a
fpf1 = [ x [ 4 ] for x in actives1 ]
fpf2 = [ x [ 4 ] for x in actives2 ]
covara = np . cov ( fpf1 , fpf2 ) [ 0 ] [ 1 ]
# covariance due to decoys = E [ { tpf2 - E ( tpf2 ) d } * { tpf1 - E ( tpf1 ) d } ]
tpf1 = [ x [ 5 ] for x in decoys1 ]
tpf2 = [ x [ 5 ] for x in decoys2 ]
covard = np . cov ( tpf1 , tpf2 ) [ 0 ] [ 1 ]
# this is only compatible with versions > = 1.5
return covara , covard |
def get_ticker ( self , symbol = None ) :
"""Get symbol tick
https : / / docs . kucoin . com / # get - ticker
: param symbol : ( optional ) Name of symbol e . g . KCS - BTC
: type symbol : string
. . code : : python
all _ ticks = client . get _ ticker ( )
ticker = client . get _ ticker ( ' ETH - BTC ' )
: returns : ApiResponse
. . code : : python
" sequence " : " 1545825031840 " , # now sequence
" price " : " 3494.367783 " , # last trade price
" size " : " 0.05027185 " , # last trade size
" bestBid " : " 3494.367783 " , # best bid price
" bestBidSize " : " 2.60323254 " , # size at best bid price
" bestAsk " : " 3499.12 " , # best ask price
" bestAskSize " : " 0.01474011 " # size at best ask price
: raises : KucoinResponseException , KucoinAPIException""" | data = { }
tick_path = 'market/allTickers'
if symbol is not None :
tick_path = 'market/orderbook/level1'
data = { 'symbol' : symbol }
return self . _get ( tick_path , False , data = data ) |
def parse ( desktop_file_or_string ) :
'''Parse a . desktop file .
Parse a . desktop file or a string with its contents into an easy - to - use dict , with standard values present even if not defined in file .
Args :
desktop _ file _ or _ string ( str ) : Either the path to a . desktop file or a string with a . desktop file as its contents .
Returns :
dict : A dictionary of the parsed file .''' | if os . path . isfile ( desktop_file_or_string ) :
with open ( desktop_file_or_string ) as f :
desktop_file = f . read ( )
else :
desktop_file = desktop_file_or_string
result = { }
for line in desktop_file . split ( '\n' ) :
if '=' in line :
result [ line . split ( '=' ) [ 0 ] ] = line . split ( '=' ) [ 1 ]
for key , value in result . items ( ) :
if value == 'false' :
result [ key ] = False
elif value == 'true' :
result [ key ] = True
if not 'Terminal' in result :
result [ 'Terminal' ] = False
if not 'Hidden' in result :
result [ 'Hidden' ] = False
return result |
def stats ( self , shp , stats = 'mean' , add_stats = None , raster_out = True , * args , ** kwargs ) :
'''Compute raster statistics for a given geometry in shape , where shape is either
a GeoPandas DataFrame , shapefile , or some other geometry format used by
python - raster - stats . Runs python - raster - stats in background
( additional help and info can be found there )
Returns dataframe with statistics and clipped raster
Usage :
df = geo . stats ( shape , stats = stats , add _ stats = add _ stats )
where :
raster _ out : If True ( Default ) , returns clipped Georasters''' | df = pd . DataFrame ( zonal_stats ( shp , self . raster , nodata = self . nodata_value , all_touched = True , raster_out = raster_out , affine = Affine . from_gdal ( * self . geot ) , geojson_out = True , stats = stats , add_stats = add_stats ) )
df [ 'GeoRaster' ] = df . properties . apply ( lambda x : GeoRaster ( x [ 'mini_raster_array' ] , Affine . to_gdal ( x [ 'mini_raster_affine' ] ) , nodata_value = x [ 'mini_raster_nodata' ] , projection = self . projection , datatype = self . datatype ) )
statcols = list ( set ( [ i for i in df . properties [ 0 ] . keys ( ) ] ) . difference ( set ( shp . columns ) ) )
cols = shp . columns . tolist ( ) + statcols
cols = [ i for i in cols if i != 'geometry' and i . find ( 'mini_raster' ) == - 1 ]
df2 = pd . DataFrame ( [ df . properties . apply ( lambda x : x [ i ] ) for i in cols ] ) . T
df2 . columns = cols
df2 = df2 . merge ( df [ [ 'id' , 'GeoRaster' ] ] , left_index = True , right_index = True )
df2 . set_index ( 'id' , inplace = True )
return df2 |
def isItemAllowed ( self , obj ) :
"""Returns true if the current analysis to be rendered has a slot
assigned for the current layout .
: param obj : analysis to be rendered as a row in the list
: type obj : ATContentType / DexterityContentType
: return : True if the obj has an slot assigned . Otherwise , False .
: rtype : bool""" | uid = api . get_uid ( obj )
if not self . get_item_slot ( uid ) :
logger . warning ( "Slot not assigned to item %s" % uid )
return False
return BaseView . isItemAllowed ( self , obj ) |
def parse_storage_size ( storage_size ) :
"""Parses an expression that represents an amount of storage / memory and returns the number of bytes it represents .
Args :
storage _ size ( str ) : Size in bytes . The units ` ` k ` ` ( kibibytes ) , ` ` m ` ` ( mebibytes ) and ` ` g ` `
( gibibytes ) are supported , i . e . a ` ` partition _ size ` ` of ` ` 1g ` ` equates : math : ` 2 ^ { 30 } ` bytes .
Returns :
int : Number of bytes .""" | pattern = re . compile ( r'^([0-9]+(\.[0-9]+)?)([gmk])?$' , re . I )
units = { 'k' : 1024 , 'm' : 1024 * 1024 , 'g' : 1024 * 1024 * 1024 }
match = pattern . fullmatch ( str ( storage_size ) )
if match is None :
raise ValueError ( 'Invalid partition size: {0}' . format ( storage_size ) )
groups = match . groups ( )
# no units
if groups [ 2 ] is None : # silently dropping the float , because byte is the smallest unit )
return int ( float ( groups [ 0 ] ) )
return int ( float ( groups [ 0 ] ) * units [ groups [ 2 ] . lower ( ) ] ) |
def regular_index ( * dfs ) :
"""Change & restore the indices of dataframes
Dataframe with duplicate values can be hard to work with .
When split and recombined , you cannot restore the row order .
This can be the case even if the index has unique but
irregular / unordered . This contextmanager resets the unordered
indices of any dataframe passed to it , on exit it restores
the original index .
A regular index is of the form : :
RangeIndex ( start = 0 , stop = n , step = 1)
Parameters
dfs : tuple
Dataframes
Yields
dfs : tuple
Dataframe
Examples
Create dataframes with different indices
> > > df1 = pd . DataFrame ( [ 4 , 3 , 2 , 1 ] )
> > > df2 = pd . DataFrame ( [ 3 , 2 , 1 ] , index = [ 3 , 0 , 0 ] )
> > > df3 = pd . DataFrame ( [ 11 , 12 , 13 ] , index = [ 11 , 12 , 13 ] )
Within the contexmanager all frames have nice range indices
> > > with regular _ index ( df1 , df2 , df3 ) :
. . . print ( df1 . index )
. . . print ( df2 . index )
. . . print ( df3 . index )
RangeIndex ( start = 0 , stop = 4 , step = 1)
RangeIndex ( start = 0 , stop = 3 , step = 1)
RangeIndex ( start = 0 , stop = 3 , step = 1)
Indices restored
> > > df1 . index
RangeIndex ( start = 0 , stop = 4 , step = 1)
> > > df2 . index
Int64Index ( [ 3 , 0 , 0 ] , dtype = ' int64 ' )
> > > df3 . index
Int64Index ( [ 11 , 12 , 13 ] , dtype = ' int64 ' )""" | original_index = [ df . index for df in dfs ]
have_bad_index = [ not isinstance ( df . index , pd . RangeIndex ) for df in dfs ]
for df , bad in zip ( dfs , have_bad_index ) :
if bad :
df . reset_index ( drop = True , inplace = True )
try :
yield dfs
finally :
for df , bad , idx in zip ( dfs , have_bad_index , original_index ) :
if bad and len ( df . index ) == len ( idx ) :
df . index = idx |
def unpack ( self , packed_value , major = DEFAULT_KATCP_MAJOR ) :
"""Parse a KATCP parameter into an object .
Parameters
packed _ value : str
The unescaped KATCP string to parse into a value .
major : int , optional
Major version of KATCP to use when interpreting types .
Defaults to latest implemented KATCP version .
Returns
value : object
The value the KATCP string represented .""" | if packed_value is None :
value = self . get_default ( )
else :
try :
value = self . decode ( packed_value , major )
except Exception :
raise
if value is not None :
self . check ( value , major )
return value |
def plot_every_step ( sdat , lovs ) :
"""Plot profiles at each time step .
Args :
sdat ( : class : ` ~ stagpy . stagyydata . StagyyData ` ) : a StagyyData instance .
lovs ( nested list of str ) : nested list of profile names such as
the one produced by : func : ` stagpy . misc . list _ of _ vars ` .
Other Parameters :
conf . core . snapshots : the slice of snapshots .
conf . conf . timesteps : the slice of timesteps .""" | sovs = misc . set_of_vars ( lovs )
for step in sdat . walk . filter ( rprof = True ) :
rprofs = { }
rads = { }
metas = { }
for rvar in sovs :
rprof , rad , meta = get_rprof ( step , rvar )
rprofs [ rvar ] = rprof
metas [ rvar ] = meta
if rad is not None :
rads [ rvar ] = rad
rprofs [ 'bounds' ] = misc . get_rbounds ( step )
rcmb , rsurf = misc . get_rbounds ( step )
rprofs [ 'bounds' ] = ( step . sdat . scale ( rcmb , 'm' ) [ 0 ] , step . sdat . scale ( rsurf , 'm' ) [ 0 ] )
rprofs [ 'r' ] = get_rprof ( step , 'r' ) [ 0 ] + rprofs [ 'bounds' ] [ 0 ]
stepstr = str ( step . istep )
_plot_rprof_list ( sdat , lovs , rprofs , metas , stepstr , rads ) |
def sorted_bits ( self ) -> List [ Tuple [ str , int ] ] :
"""Return list of bit items sorted by position .""" | return sorted ( self . bit . items ( ) , key = lambda x : x [ 1 ] ) |
def inplace_reload ( method ) :
"""Executes the wrapped function and reloads the object
with data returned from the server .""" | # noinspection PyProtectedMember
def wrapped ( obj , * args , ** kwargs ) :
in_place = True if kwargs . get ( 'inplace' ) in ( True , None ) else False
api_object = method ( obj , * args , ** kwargs )
if in_place and api_object :
obj . _data = api_object . _data
obj . _dirty = api_object . _dirty
obj . _data . fetched = False
return obj
elif api_object :
return api_object
else :
return obj
return wrapped |
async def stop ( self , ** kwargs ) :
"""Stop pairing process .""" | if not self . _pin_code :
raise Exception ( 'no pin given' )
# TODO : new exception
self . service . device_credentials = await self . pairing_procedure . finish_pairing ( self . _pin_code ) |
def changeTo ( self , path ) :
'''change value
Args :
path ( str ) : the new environment path''' | dictionary = DictSingle ( Pair ( 'PATH' , StringSingle ( path ) ) )
self . value = [ dictionary ] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.