signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def to_internal_filter ( self , attribute_profile , external_attribute_names ) :
"""Converts attribute names from external " type " to internal
: type attribute _ profile : str
: type external _ attribute _ names : list [ str ]
: type case _ insensitive : bool
: rtype : list [ str ]
: param attribute _ profile : From which external type to convert ( ex : oidc , saml , . . . )
: param external _ attribute _ names : A list of attribute names
: param case _ insensitive : Create a case insensitive filter
: return : A list of attribute names in the internal format""" | try :
profile_mapping = self . to_internal_attributes [ attribute_profile ]
except KeyError :
logger . warn ( "no attribute mapping found for the given attribute profile '%s'" , attribute_profile )
# no attributes since the given profile is not configured
return [ ]
internal_attribute_names = set ( )
# use set to ensure only unique values
for external_attribute_name in external_attribute_names :
try :
internal_attribute_name = profile_mapping [ external_attribute_name ]
internal_attribute_names . add ( internal_attribute_name )
except KeyError :
pass
return list ( internal_attribute_names ) |
def imap ( requests , stream = False , size = 2 , exception_handler = None ) :
"""Concurrently converts a generator object of Requests to
a generator of Responses .
: param requests : a generator of Request objects .
: param stream : If True , the content will not be downloaded immediately .
: param size : Specifies the number of requests to make at a time . default is 2
: param exception _ handler : Callback function , called when exception occured . Params : Request , Exception""" | pool = Pool ( size )
def send ( r ) :
return r . send ( stream = stream )
for request in pool . imap_unordered ( send , requests ) :
if request . response is not None :
yield request . response
elif exception_handler :
ex_result = exception_handler ( request , request . exception )
if ex_result is not None :
yield ex_result
pool . join ( ) |
def _computeAsymptoticCovarianceMatrix ( self , W , N_k , method = None ) :
"""Compute estimate of the asymptotic covariance matrix .
Parameters
W : np . ndarray , shape = ( N , K ) , dtype = ' float '
The normalized weight matrix for snapshots and states .
W [ n , k ] is the weight of snapshot n in state k .
N _ k : np . ndarray , shape = ( K ) , dtype = ' int '
N _ k [ k ] is the number of samples from state k .
method : string , optional , default = None
Method used to compute the asymptotic covariance matrix .
Must be either " approximate " , " svd " , or " svd - ew " . If None ,
defaults to " svd - ew " .
Returns
Theta : np . ndarray , shape = ( K , K ) , dtype = ' float '
Asymptotic covariance matrix
Notes
The computational costs of the various ' method ' arguments varies :
' svd ' computes the generalized inverse using the singular value decomposition - - this should be efficient yet accurate ( faster )
' svd - ew ' is the same as ' svd ' , but uses the eigenvalue decomposition of W ' W to bypass the need to perform an SVD ( fastest )
' approximate ' only requires multiplication of KxN and NxK matrices , but is an approximate underestimate of the uncertainty .
svd and svd - ew are described in appendix D of Shirts , 2007 JCP , while
" approximate " in Section 4 of Kong , 2003 . J . R . Statist . Soc . B .
We currently recommend ' svd - ew ' .""" | # Set ' svd - ew ' as default if uncertainty method specified as None .
if method == None :
method = 'svd-ew'
# Get dimensions of weight matrix .
[ N , K ] = W . shape
# Check dimensions
if ( K != N_k . size ) :
raise ParameterError ( 'W must be NxK, where N_k is a K-dimensional array.' )
if ( np . sum ( N_k ) != N ) :
raise ParameterError ( 'W must be NxK, where N = sum_k N_k.' )
check_w_normalized ( W , N_k )
# Compute estimate of asymptotic covariance matrix using specified method .
if method == 'approximate' : # Use fast approximate expression from Kong et al . - - this underestimates the true covariance , but may be a good approximation in some cases and requires no matrix inversions
# Theta = P ' P
# Construct matrices
W = np . matrix ( W , dtype = np . float64 )
# Compute covariance
Theta = W . T * W
elif method == 'svd' : # Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty
# See Appendix D . 1 , Eq . D4 in [ 1 ] .
# Construct matrices
Ndiag = np . matrix ( np . diag ( N_k ) , dtype = np . float64 )
W = np . matrix ( W , dtype = np . float64 )
I = np . identity ( K , dtype = np . float64 )
# Compute SVD of W
[ U , S , Vt ] = linalg . svd ( W , full_matrices = False )
# False Avoids O ( N ^ 2 ) memory allocation by only calculting the active subspace of U .
Sigma = np . matrix ( np . diag ( S ) )
V = np . matrix ( Vt ) . T
# Compute covariance
Theta = V * Sigma * self . _pseudoinverse ( I - Sigma * V . T * Ndiag * V * Sigma ) * Sigma * V . T
elif method == 'svd-ew' : # Use singular value decomposition based approach given in supplementary material to efficiently compute uncertainty
# The eigenvalue decomposition of W ' W is used to forego computing the SVD .
# See Appendix D . 1 , Eqs . D4 and D5 of [ 1 ] .
# Construct matrices
Ndiag = np . matrix ( np . diag ( N_k ) , dtype = np . float64 )
W = np . matrix ( W , dtype = np . float64 )
I = np . identity ( K , dtype = np . float64 )
# Compute singular values and right singular vectors of W without using SVD
# Instead , we compute eigenvalues and eigenvectors of W ' W .
# Note W ' W = ( U S V ' ) ' ( U S V ' ) = V S ' U ' U S V ' = V ( S ' S ) V '
[ S2 , V ] = linalg . eigh ( W . T * W )
# Set any slightly negative eigenvalues to zero .
S2 [ np . where ( S2 < 0.0 ) ] = 0.0
# Form matrix of singular values Sigma , and V .
Sigma = np . matrix ( np . diag ( np . sqrt ( S2 ) ) )
V = np . matrix ( V )
# Compute covariance
Theta = V * Sigma * self . _pseudoinverse ( I - Sigma * V . T * Ndiag * V * Sigma ) * Sigma * V . T
else : # Raise an exception .
raise ParameterError ( 'Method ' + method + ' unrecognized.' )
return Theta |
def singleton ( * args , ** kwargs ) :
'''a lazy init singleton pattern .
usage :
` ` ` py
@ singleton ( )
class X : . . .
` args ` and ` kwargs ` will pass to ctor of ` X ` as args .''' | def decorator ( cls : type ) -> Callable [ [ ] , object ] :
if issubclass ( type ( cls ) , _SingletonMetaClassBase ) :
raise TypeError ( 'cannot inherit from another singleton class.' )
box = _Box ( )
factory = None
lock = Lock ( )
def metaclass_call ( _ ) :
if box . value is None :
with lock :
if box . value is None :
instance = cls ( * args , ** kwargs )
instance . __class__ = factory
box . value = ( instance , )
# use tuple to handle ` cls ( ) ` return ` None `
return box . value [ 0 ]
def _is_init ( * _ ) :
return box . value is not None
SingletonMetaClass = type ( 'SingletonMetaClass' , ( type ( cls ) , _SingletonMetaClassBase ) , { '__slots__' : ( ) , '__call__' : metaclass_call } )
factory = SingletonMetaClass ( cls . __name__ , ( cls , ) , { '__slots__' : ( ) , '_is_init' : _is_init } )
return update_wrapper ( factory , cls , updated = ( ) )
return decorator |
def _send_message ( self , method , endpoint , params = None , data = None ) :
"""Send API request .
Args :
method ( str ) : HTTP method ( get , post , delete , etc . )
endpoint ( str ) : Endpoint ( to be added to base URL )
params ( Optional [ dict ] ) : HTTP request parameters
data ( Optional [ str ] ) : JSON - encoded string payload for POST
Returns :
dict / list : JSON response""" | url = self . url + endpoint
r = self . session . request ( method , url , params = params , data = data , auth = self . auth , timeout = 30 )
return r . json ( ) |
def to_file_async ( self , destination , format = 'csv' , csv_delimiter = ',' , csv_header = True ) :
"""Start saving the results to a local file in CSV format and return a Job for completion .
Args :
destination : path on the local filesystem for the saved results .
format : the format to use for the exported data ; currently only ' csv ' is supported .
csv _ delimiter : for CSV exports , the field delimiter to use . Defaults to ' , '
csv _ header : for CSV exports , whether to include an initial header line . Default true .
Returns :
A Job for the async save operation .
Raises :
An Exception if the operation failed .""" | self . to_file ( destination , format = format , csv_delimiter = csv_delimiter , csv_header = csv_header ) |
def list_subdirs ( self , container , marker = None , limit = None , prefix = None , delimiter = None , full_listing = False ) :
"""Returns a list of StorageObjects representing the pseudo - subdirectories
in the specified container . You can use the marker and limit params to
handle pagination , and the prefix param to filter the objects returned .
The ' delimiter ' parameter is ignored , as the only meaningful value is""" | mthd = container . list_all if full_listing else container . list
objs = mthd ( marker = marker , limit = limit , prefix = prefix , delimiter = "/" , return_raw = True )
sdirs = [ obj for obj in objs if "subdir" in obj ]
mgr = container . object_manager
return [ StorageObject ( mgr , sdir ) for sdir in sdirs ] |
def unwrap_or_else ( self , callback : Callable [ [ ] , U ] ) -> Union [ T , U ] :
"""Returns the contained value or computes it from ` ` callback ` ` .
Args :
callback : The the default callback .
Returns :
The contained value if the : py : class : ` Option ` is ` ` Some ` ` ,
otherwise ` ` callback ( ) ` ` .
Examples :
> > > Some ( 0 ) . unwrap _ or _ else ( lambda : 111)
> > > NONE . unwrap _ or _ else ( lambda : ' ha ' )
' ha '""" | return self . _val if self . _is_some else callback ( ) |
def trim_shared_prefix ( ref , alt ) :
"""Sometimes mutations are given with a shared prefix between the reference
and alternate strings . Examples : C > CT ( nucleotides ) or GYFP > G ( amino acids ) .
This function trims the common prefix and returns the disjoint ref
and alt strings , along with the shared prefix .""" | n_ref = len ( ref )
n_alt = len ( alt )
n_min = min ( n_ref , n_alt )
i = 0
while i < n_min and ref [ i ] == alt [ i ] :
i += 1
# guaranteed that ref and alt agree on all the characters
# up to i ' th position , so it doesn ' t matter which one we pull
# the prefix out of
prefix = ref [ : i ]
ref_suffix = ref [ i : ]
alt_suffix = alt [ i : ]
return ref_suffix , alt_suffix , prefix |
def get_all_file_report_pages ( self , query ) :
"""Get File Report ( All Pages ) .
: param query : a VirusTotal Intelligence search string in accordance with the file search documentation .
: return : All JSON responses appended together .""" | responses = [ ]
r = self . get_hashes_from_search ( query )
responses . append ( r )
if ( 'results' in r . keys ( ) ) and ( 'next_page' in r [ 'results' ] . keys ( ) ) :
next_page = r [ 'results' ] [ 'next_page' ]
else :
next_page = None
while next_page :
r = self . get_hashes_from_search ( query , next_page )
if ( 'results' in r . keys ( ) ) and ( 'next_page' in r [ 'results' ] . keys ( ) ) :
next_page = r [ 'results' ] [ 'next_page' ]
else :
next_page = None
responses . append ( r )
return dict ( results = responses ) |
def argmin ( iterable , key = None , both = False ) :
"""See ` argmax ` .""" | if key is not None :
it = imap ( key , iterable )
else :
it = iter ( iterable )
score , argmin = reduce ( min , izip ( it , count ( ) ) )
if both :
return argmin , score
return argmin |
def get_elastic_items_search ( elastic , search_after = None , size = None ) :
"""Get the items from the index using search after scrolling""" | if not size :
size = DEFAULT_LIMIT
url = elastic . index_url + "/_search"
search_after_query = ''
if search_after :
logging . debug ( "Search after: %s" , search_after )
# timestamp uuid
search_after_query = ', "search_after": [%i, "%s"] ' % ( search_after [ 0 ] , search_after [ 1 ] )
query = """
{
"size": %i,
"query": {
"bool": {
"must": []
}
},
"sort": [
{"metadata__timestamp": "asc"},
{"uuid": "asc"}
] %s
}
""" % ( size , search_after_query )
# logging . debug ( " % s \ n % s " , url , json . dumps ( json . loads ( query ) , indent = 4 ) )
res = requests . post ( url , data = query )
rjson = None
try :
rjson = res . json ( )
except Exception :
logging . error ( "No JSON found in %s" , res . text )
logging . error ( "No results found from %s" , url )
return rjson |
def importRemoteSNPs ( name ) :
"""Import a SNP set available from http : / / pygeno . iric . ca ( might work ) .""" | try :
dw = listRemoteDatawraps ( ) [ "Flat" ] [ "SNPs" ]
except AttributeError :
raise AttributeError ( "There's no remote genome datawrap by the name of: '%s'" % name )
finalFile = _DW ( name , dw [ "url" ] )
PS . importSNPs ( finalFile ) |
def open_image ( fname_or_instance : Union [ str , IO [ bytes ] ] ) :
"""Opens a Image and returns it .
: param fname _ or _ instance : Can either be the location of the image as a
string or the Image . Image instance itself .""" | if isinstance ( fname_or_instance , Image . Image ) :
return fname_or_instance
return Image . open ( fname_or_instance ) |
def parseTextModeTimeStr ( timeStr ) :
"""Parses the specified SMS text mode time string
The time stamp format is " yy / MM / dd , hh : mm : ss ± zz "
( yy = year , MM = month , dd = day , hh = hour , mm = minute , ss = second , zz = time zone
[ Note : the unit of time zone is a quarter of an hour ] )
: param timeStr : The time string to parse
: type timeStr : str
: return : datetime object representing the specified time string
: rtype : datetime . datetime""" | msgTime = timeStr [ : - 3 ]
tzOffsetHours = int ( int ( timeStr [ - 3 : ] ) * 0.25 )
return datetime . strptime ( msgTime , '%y/%m/%d,%H:%M:%S' ) . replace ( tzinfo = SimpleOffsetTzInfo ( tzOffsetHours ) ) |
def read ( self ) :
"""Reads the contents of the GPFile and returns the contents as a string ( assumes UTF - 8)""" | with closing ( self . open ( ) ) as f :
data = f . read ( )
return data . decode ( "utf-8" ) or None |
def is_conflicting ( self ) :
"""If installed version conflicts with required version""" | # unknown installed version is also considered conflicting
if self . installed_version == self . UNKNOWN_VERSION :
return True
ver_spec = ( self . version_spec if self . version_spec else '' )
req_version_str = '{0}{1}' . format ( self . project_name , ver_spec )
req_obj = pkg_resources . Requirement . parse ( req_version_str )
return self . installed_version not in req_obj |
def add_entry ( self , net_type , cn , addresses ) :
"""Add a request to the batch
: param net _ type : str netwrok space name request is for
: param cn : str Canonical Name for certificate
: param addresses : [ ] List of addresses to be used as SANs""" | self . entries . append ( { 'cn' : cn , 'addresses' : addresses } ) |
def client_factory ( self ) :
"""Custom client factory to set proxy options .""" | if self . _service . production :
url = self . production_url
else :
url = self . testing_url
proxy_options = dict ( )
https_proxy_setting = os . environ . get ( 'PAYEX_HTTPS_PROXY' ) or os . environ . get ( 'https_proxy' )
http_proxy_setting = os . environ . get ( 'PAYEX_HTTP_PROXY' ) or os . environ . get ( 'http_proxy' )
if https_proxy_setting :
proxy_options [ 'https' ] = https_proxy_setting
if http_proxy_setting :
proxy_options [ 'http' ] = http_proxy_setting
return client . Client ( url , proxy = proxy_options ) |
def all ( self , count = 500 , offset = 0 , type = None , inactive = None , emailFilter = None , tag = None , messageID = None , fromdate = None , todate = None , ) :
"""Returns many bounces .
: param int count : Number of bounces to return per request .
: param int offset : Number of bounces to skip .
: param str type : Filter by type of bounce .
: param bool inactive : Filter by emails that were deactivated by Postmark due to the bounce .
: param str emailFilter : Filter by email address .
: param str tag : Filter by tag .
: param str messageID : Filter by messageID .
: param date fromdate : Filter messages starting from the date specified ( inclusive ) .
: param date todate : Filter messages up to the date specified ( inclusive ) .
: return : A list of : py : class : ` Bounce ` instances .
: rtype : ` list `""" | responses = self . call_many ( "GET" , "/bounces/" , count = count , offset = offset , type = type , inactive = inactive , emailFilter = emailFilter , tag = tag , messageID = messageID , fromdate = fromdate , todate = todate , )
return self . expand_responses ( responses , "Bounces" ) |
def delete_fastqs ( job , fastqs ) :
"""This module will delete the fastqs from teh job Store once their purpose has been achieved ( i . e .
after all mapping steps )
ARGUMENTS
1 . fastqs : Dict of list of input fastqs
fastqs
+ - ' tumor _ rna ' : [ < JSid for 1 . fastq > , < JSid for 2 . fastq > ]
+ - ' tumor _ dna ' : [ < JSid for 1 . fastq > , < JSid for 2 . fastq > ]
+ - ' normal _ dna ' : [ < JSid for 1 . fastq > , < JSid for 2 . fastq > ]""" | for fq_type in [ 'tumor_rna' , 'tumor_dna' , 'normal_dna' ] :
for i in xrange ( 0 , 2 ) :
job . fileStore . deleteGlobalFile ( fastqs [ fq_type ] [ i ] )
return None |
def histogram ( n_traces = 1 , n = 500 , dispersion = 2 , mode = None ) :
"""Returns a DataFrame with the required format for
a histogram plot
Parameters :
n _ traces : int
Number of traces
n : int
Number of points for each trace
mode : string
Format for each item
' abc ' for alphabet columns
' stocks ' for random stock names""" | df = pd . DataFrame ( np . transpose ( [ np . random . randn ( n ) + np . random . randint ( - 1 * dispersion , dispersion ) for _ in range ( n_traces ) ] ) , columns = getName ( n_traces , mode = mode ) )
return df |
def migrate ( vm_ , target , live = 1 , port = 0 , node = - 1 , ssl = None , change_home_server = 0 ) :
'''Migrates the virtual machine to another hypervisor
CLI Example :
. . code - block : : bash
salt ' * ' virt . migrate < vm name > < target hypervisor > [ live ] [ port ] [ node ] [ ssl ] [ change _ home _ server ]
Optional values :
live
Use live migration
port
Use a specified port
node
Use specified NUMA node on target
ssl
use ssl connection for migration
change _ home _ server
change home server for managed domains''' | with _get_xapi_session ( ) as xapi :
vm_uuid = _get_label_uuid ( xapi , 'VM' , vm_ )
if vm_uuid is False :
return False
other_config = { 'port' : port , 'node' : node , 'ssl' : ssl , 'change_home_server' : change_home_server }
try :
xapi . VM . migrate ( vm_uuid , target , bool ( live ) , other_config )
return True
except Exception :
return False |
def _nodes ( output , parent = None ) :
"""Yield nodes from entities .""" | # NOTE refactor so all outputs behave the same
entity = getattr ( output , 'entity' , output )
if isinstance ( entity , Collection ) :
for member in entity . members :
if parent is not None :
member = attr . evolve ( member , parent = parent )
yield from _nodes ( member )
yield output
else :
yield output |
def set_params ( self , ** params ) :
"""Set parameters on this object
Safe setter method - attributes should not be modified directly as some
changes are not valid .
Valid parameters :
- n _ landmark
- n _ svd
Parameters
params : key - value pairs of parameter name and new values
Returns
self""" | # update parameters
reset_landmarks = False
if 'n_landmark' in params and params [ 'n_landmark' ] != self . n_landmark :
self . n_landmark = params [ 'n_landmark' ]
reset_landmarks = True
if 'n_svd' in params and params [ 'n_svd' ] != self . n_svd :
self . n_svd = params [ 'n_svd' ]
reset_landmarks = True
# update superclass parameters
super ( ) . set_params ( ** params )
# reset things that changed
if reset_landmarks :
self . _reset_landmarks ( )
return self |
def _recursive_url_find ( self , item , image_list ) :
"""Recursively traverses a dictionary - like data structure for Khan Academy
assessment items in order to search for image links in ` url ` data attributes ,
and if it finds any it adds them to ` image _ list ` and rewrites ` url ` attribute .
Use cases :
- ` backgroundImage . url ` attributes for graphs and images
Args :
item ( dict ) : KA assessment item ; will be modified in place
image _ list ( list ) : image files ( File objects ) found during the traversal
Returns : None""" | recursive_fn = partial ( self . _recursive_url_find , image_list = image_list )
if isinstance ( item , list ) :
list ( map ( recursive_fn , item ) )
elif isinstance ( item , dict ) :
if 'url' in item :
if item [ 'url' ] :
item [ 'url' ] , image_file = self . set_image ( item [ 'url' ] )
image_list += image_file
for field , field_data in item . items ( ) :
if isinstance ( field_data , dict ) :
self . _recursive_url_find ( field_data , image_list )
elif isinstance ( field_data , list ) :
list ( map ( recursive_fn , field_data ) ) |
def UGT ( a : BitVec , b : BitVec ) -> Bool :
"""Create an unsigned greater than expression .
: param a :
: param b :
: return :""" | return _comparison_helper ( a , b , z3 . UGT , default_value = False , inputs_equal = False ) |
def argument_run ( self , sp_r ) :
""". . _ argument _ run :
Converts Arguments according to ` ` to _ int ` `""" | arg_run = [ ]
for line in sp_r :
logging . debug ( "argument run: handling: " + str ( line ) )
if ( line [ 1 ] == "data" ) :
arg_run . append ( ( line [ 0 ] , line [ 1 ] , line [ 2 ] , line [ 2 ] . get_words ( line [ 3 ] ) ) )
continue
if ( line [ 1 ] == "command" ) :
self . checkargs ( line [ 0 ] , line [ 2 ] , line [ 3 ] )
arg_run . append ( ( line [ 0 ] , line [ 1 ] , line [ 2 ] , [ a for a in self . convert_args ( line [ 2 ] , line [ 3 ] ) ] ) )
return arg_run |
async def fetchall ( self ) :
"""Fetch all , as per MySQLdb . Pretty useless for large queries , as
it is buffered .""" | rows = [ ]
while True :
row = await self . fetchone ( )
if row is None :
break
rows . append ( row )
return rows |
def check_dependencies ( req , indent = 1 , history = None ) :
"""Given a setuptools package requirement ( e . g . ' gryphon = = 2.42 ' or just
' gryphon ' ) , print a tree of dependencies as they resolve in this
environment .""" | # keep a history to avoid infinite loops
if history is None :
history = set ( )
if req in history :
return
history . add ( req )
d = pkg_resources . get_distribution ( req )
extras = parse_extras ( req )
if indent == 1 :
print_package ( req , 0 )
for r in d . requires ( extras = extras ) :
print_package ( r , indent )
check_dependencies ( r , indent + 1 , history ) |
def on_number ( self , ctx , value ) :
'''Since this is defined both integer and double callbacks are useless''' | value = int ( value ) if value . isdigit ( ) else float ( value )
top = self . _stack [ - 1 ]
if top is JSONCompositeType . OBJECT :
self . fire ( JSONStreamer . VALUE_EVENT , value )
elif top is JSONCompositeType . ARRAY :
self . fire ( JSONStreamer . ELEMENT_EVENT , value )
else :
raise RuntimeError ( 'Invalid json-streamer state' ) |
def to_split ( val , split_on_comma = True ) :
"""Try to split a string with comma separator .
If val is already a list return it
If we don ' t have to split just return [ val ]
If split gives only [ ' ' ] empty it
: param val : value to split
: type val :
: param split _ on _ comma :
: type split _ on _ comma : bool
: return : split value on comma
: rtype : list
> > > to _ split ( ' a , b , c ' )
[ ' a ' , ' b ' , ' c ' ]
> > > to _ split ( ' a , b , c ' , False )
[ ' a , b , c ' ]
> > > to _ split ( [ ' a , b , c ' ] )
[ ' a , b , c ' ]
> > > to _ split ( ' ' )""" | if isinstance ( val , list ) :
return val
if not split_on_comma :
return [ val ]
val = val . split ( ',' )
if val == [ '' ] :
val = [ ]
return val |
def get_config ( self , key , default = None ) :
'''Lookup a config field and return its value , first checking the
route . config , then route . app . config .''' | for conf in ( self . config , self . app . conifg ) :
if key in conf :
return conf [ key ]
return default |
def register_method ( self , func ) :
"""Register a function to be available as RPC method .
The given function will be inspected to find external _ name , protocol and entry _ point values set by the decorator
@ rpc _ method .
: param func : A function previously decorated using @ rpc _ method
: return : The name of registered method""" | if not getattr ( func , 'modernrpc_enabled' , False ) :
raise ImproperlyConfigured ( 'Error: trying to register {} as RPC method, but it has not been decorated.' . format ( func . __name__ ) )
# Define the external name of the function
name = getattr ( func , 'modernrpc_name' , func . __name__ )
logger . debug ( 'Register RPC method "{}"' . format ( name ) )
if name . startswith ( 'rpc.' ) :
raise ImproperlyConfigured ( 'According to RPC standard, method names starting with "rpc." are reserved for ' 'system extensions and must not be used. See ' 'http://www.jsonrpc.org/specification#extensions for more information.' )
# Encapsulate the function in a RPCMethod object
method = RPCMethod ( func )
# Ensure method names are unique in the registry
existing_method = self . get_method ( method . name , ALL , ALL )
if existing_method is not None : # Trying to register many times the same function is OK , because if a method is decorated
# with @ rpc _ method ( ) , it could be imported in different places of the code
if method == existing_method :
return method . name
# But if we try to use the same name to register 2 different methods , we
# must inform the developer there is an error in the code
else :
raise ImproperlyConfigured ( "A RPC method with name {} has already been registered" . format ( method . name ) )
# Store the method
self . _registry [ method . name ] = method
logger . debug ( 'Method registered. len(registry): {}' . format ( len ( self . _registry ) ) )
return method . name |
def stop ( self , timeout = None ) :
"""Stop the producer ( async mode ) . Blocks until async thread completes .""" | if timeout is not None :
log . warning ( 'timeout argument to stop() is deprecated - ' 'it will be removed in future release' )
if not self . async_send :
log . warning ( 'producer.stop() called, but producer is not async' )
return
if self . stopped :
log . warning ( 'producer.stop() called, but producer is already stopped' )
return
if self . async_send :
self . queue . put ( ( STOP_ASYNC_PRODUCER , None , None ) )
self . thread_stop_event . set ( )
self . thread . join ( )
if hasattr ( self , '_cleanup_func' ) : # Remove cleanup handler now that we ' ve stopped
# py3 supports unregistering
if hasattr ( atexit , 'unregister' ) :
atexit . unregister ( self . _cleanup_func )
# pylint : disable = no - member
# py2 requires removing from private attribute . . .
else : # ValueError on list . remove ( ) if the exithandler no longer exists
# but that is fine here
try :
atexit . _exithandlers . remove ( # pylint : disable = no - member
( self . _cleanup_func , ( self , ) , { } ) )
except ValueError :
pass
del self . _cleanup_func
self . stopped = True |
async def reboot ( self ) :
"""Reboot the device .""" | endpoint = '/setup/reboot'
url = API . format ( ip = self . _ipaddress , endpoint = endpoint )
data = { 'params' : 'now' }
returnvalue = False
try :
async with async_timeout . timeout ( 5 , loop = self . _loop ) :
result = await self . _session . post ( url , json = data , headers = HEADERS )
if result . status == 200 :
returnvalue = True
except ( asyncio . TimeoutError , aiohttp . ClientError , socket . gaierror ) as error :
_LOGGER . error ( 'Error connecting to GHLocalApi, %s' , error )
return returnvalue |
def auto_start_vm ( self ) :
"""Auto start the GNS3 VM if require""" | if self . enable :
try :
yield from self . start ( )
except GNS3VMError as e : # User will receive the error later when they will try to use the node
try :
yield from self . _controller . add_compute ( compute_id = "vm" , name = "GNS3 VM ({})" . format ( self . current_engine ( ) . vmname ) , host = None , force = True )
except aiohttp . web . HTTPConflict :
pass
log . error ( "Can't start the GNS3 VM: %s" , str ( e ) ) |
def get_file ( self , file_id ) :
"""Use this method to get basic info about a file and prepare it for downloading . For the moment , bots can download files of up to 20MB in size . On success , a File object is returned . The file can then be downloaded via the link https : / / api . telegram . org / file / bot < token > / < file _ path > , where < file _ path > is taken from the response . It is guaranteed that the link will be valid for at least 1 hour . When the link expires , a new one can be requested by calling getFile again .
Note : This function may not preserve the original file name and MIME type . You should save the file ' s MIME type and name ( if available ) when the File object is received .
https : / / core . telegram . org / bots / api # getfile
Parameters :
: param file _ id : File identifier to get info about
: type file _ id : str | unicode
Returns :
: return : On success , a File object is returned
: rtype : pytgbot . api _ types . receivable . media . File""" | assert_type_or_raise ( file_id , unicode_type , parameter_name = "file_id" )
result = self . do ( "getFile" , file_id = file_id )
if self . return_python_objects :
logger . debug ( "Trying to parse {data}" . format ( data = repr ( result ) ) )
from pytgbot . api_types . receivable . media import File
try :
return File . from_array ( result )
except TgApiParseException :
logger . debug ( "Failed parsing as api_type File" , exc_info = True )
# end try
# no valid parsing so far
raise TgApiParseException ( "Could not parse result." )
# See debug log for details !
# end if return _ python _ objects
return result |
def list_and_add ( a , b ) :
"""Concatenate anything into a list .
Args :
a : the first thing
b : the second thing
Returns :
list . All the things in a list .""" | if not isinstance ( b , list ) :
b = [ b ]
if not isinstance ( a , list ) :
a = [ a ]
return a + b |
def keep_frameshifts ( mut_df , indel_len_col = True ) :
"""Filters out all mutations that are not frameshift indels .
Requires that one of the alleles have ' - ' indicating either an insertion
or deletion depending if found in reference allele or somatic allele
columns , respectively .
Parameters
mut _ df : pd . DataFrame
mutation input file as a dataframe in standard format
indel _ len _ col : bool
whether or not to add a column indicating the length of the frameshift
Returns
mut _ df : pd . DataFrame
mutations with only frameshift mutations kept""" | # keep only frameshifts
mut_df = mut_df [ is_frameshift_annotation ( mut_df ) ]
if indel_len_col : # calculate length
mut_df . loc [ : , 'indel len' ] = compute_indel_length ( mut_df )
return mut_df |
def shift_or_mirror_into_invertible_domain ( self , solution_genotype , copy = False ) :
"""Details : input ` ` solution _ genotype ` ` is changed . The domain is
[ lb - al , ub + au ] and in [ lb - 2 * al - ( ub - lb ) / 2 , lb - al ]
mirroring is applied .""" | assert solution_genotype is not None
if copy :
y = [ val for val in solution_genotype ]
else :
y = solution_genotype
if isinstance ( y , np . ndarray ) and not isinstance ( y [ 0 ] , float ) :
y = array ( y , dtype = float )
for i in rglen ( y ) :
lb = self . _lb [ self . _index ( i ) ]
ub = self . _ub [ self . _index ( i ) ]
al = self . _al [ self . _index ( i ) ]
au = self . _au [ self . _index ( i ) ]
# x is far from the boundary , compared to ub - lb
if y [ i ] < lb - 2 * al - ( ub - lb ) / 2.0 or y [ i ] > ub + 2 * au + ( ub - lb ) / 2.0 :
r = 2 * ( ub - lb + al + au )
# period
s = lb - 2 * al - ( ub - lb ) / 2.0
# start
y [ i ] -= r * ( ( y [ i ] - s ) // r )
# shift
if y [ i ] > ub + au :
y [ i ] -= 2 * ( y [ i ] - ub - au )
if y [ i ] < lb - al :
y [ i ] += 2 * ( lb - al - y [ i ] )
return y |
def vi_pos_matching ( line , index = 0 ) :
'''find matching < ( [ { } ] ) >''' | anchor = None
target = None
delta = 1
count = 0
try :
while 1 :
if anchor is None : # first find anchor
try :
target , delta = _vi_dct_matching [ line [ index ] ]
anchor = line [ index ]
count = 1
except KeyError :
index += 1
continue
else : # Here the anchor has been found
# Need to get corresponding target
if index < 0 :
return - 1
if line [ index ] == anchor :
count += 1
elif line [ index ] == target :
count -= 1
if count == 0 :
return index
index += delta
except IndexError :
return - 1 |
def encode ( self , text , encoding , defaultchar = '?' ) :
"""Encode text under the given encoding
: param text : Text to encode
: param encoding : Encoding name to use ( must be defined in capabilities )
: param defaultchar : Fallback for non - encodable characters""" | codepage_char_map = self . _get_codepage_char_map ( encoding )
output_bytes = bytes ( [ self . _encode_char ( char , codepage_char_map , defaultchar ) for char in text ] )
return output_bytes |
def _do_bgread ( stream , blockSizeLimit , pollTime , closeStream , results ) :
'''_ do _ bgread - Worker functon for the background read thread .
@ param stream < object > - Stream to read until closed
@ param results < BackgroundReadData >''' | # Put the whole function in a try instead of just the read portion for performance reasons .
try :
while True :
nextData = nonblock_read ( stream , limit = blockSizeLimit )
if nextData is None :
break
elif nextData :
results . addBlock ( nextData )
time . sleep ( pollTime )
except Exception as e :
results . error = e
return
if closeStream and hasattr ( stream , 'close' ) :
stream . close ( )
results . isFinished = True |
def citedReferencesRetrieve ( self , queryId , count = 100 , offset = 1 , retrieveParameters = None ) :
"""The citedReferencesRetrieve operation submits a query returned by a
previous citedReferences operation .
This operation is useful for overcoming the retrieval limit of 100
records per query . For example , a citedReferences operation may find
106 cited references , as revealed by the content of the recordsFound
element , but it returns only records 1-100 . You could perform a
subsequent citedReferencesretrieve operation to obtain records 101-106.
: queryId : The query ID from a previous citedReferences operation
: count : Number of records to display in the result . Cannot be less than
0 and cannot be greater than 100 . If count is 0 then only the
summary information will be returned .
: offset : First record in results to return . Must be greater than zero
: retrieveParameters : Retrieve parameters . If omitted the result of
make _ retrieveParameters ( offset , count , ' RS ' , ' D ' )
is used .""" | return self . _search . service . citedReferencesRetrieve ( queryId = queryId , retrieveParameters = ( retrieveParameters or self . make_retrieveParameters ( offset , count ) ) ) |
def parse_reports ( self ) :
"""Find Picard RrbsSummaryMetrics reports and parse their data""" | # Set up vars
self . picard_rrbs_metrics = dict ( )
# Go through logs and find Metrics
for f in self . find_log_files ( 'picard/rrbs_metrics' , filehandles = True ) :
parsed_data = dict ( )
s_name = None
keys = None
for l in f [ 'f' ] : # New log starting
if 'CollectRrbsMetrics' in l and 'INPUT' in l :
s_name = None
keys = None
# Pull sample name from input
fn_search = re . search ( r"INPUT(?:=|\s+)(\[?[^\s]+\]?)" , l , flags = re . IGNORECASE )
if fn_search :
s_name = os . path . basename ( fn_search . group ( 1 ) . strip ( '[]' ) )
s_name = self . clean_s_name ( s_name , f [ 'root' ] )
parsed_data [ s_name ] = dict ( )
if s_name is not None :
if 'RrbsSummaryMetrics' in l and '## METRICS CLASS' in l :
keys = f [ 'f' ] . readline ( ) . strip ( "\n" ) . split ( "\t" )
elif keys :
vals = l . strip ( "\n" ) . split ( "\t" )
if len ( vals ) == len ( keys ) :
for i , k in enumerate ( keys ) :
try :
parsed_data [ s_name ] [ k ] = float ( vals [ i ] )
except ValueError :
parsed_data [ s_name ] [ k ] = vals [ i ]
else :
s_name = None
keys = None
# Remove empty dictionaries
for s_name in list ( parsed_data . keys ( ) ) :
if len ( parsed_data [ s_name ] ) == 0 :
parsed_data . pop ( s_name , None )
# Collect parsed data
for s_name in parsed_data . keys ( ) :
if s_name in self . picard_rrbs_metrics :
log . debug ( "Duplicate sample name found in {}! Overwriting: {}" . format ( f [ 'fn' ] , s_name ) )
self . add_data_source ( f , s_name , section = 'RrbsSummaryMetrics' )
self . picard_rrbs_metrics [ s_name ] = parsed_data [ s_name ]
# Filter to strip out ignored sample names
self . picard_rrbs_metrics = self . ignore_samples ( self . picard_rrbs_metrics )
if len ( self . picard_rrbs_metrics ) > 0 : # Write parsed data to a file
self . write_data_file ( self . picard_rrbs_metrics , 'multiqc_picard_RrbsSummaryMetrics' )
# Add to general stats table
self . general_stats_headers [ 'PCT_CPG_BASES_CONVERTED' ] = { 'title' : '% CpG Methylated' , 'description' : 'Percentage of times a CpG cytosine was converted' , 'max' : 100 , 'min' : 0 , 'suffix' : '%' , 'format' : '{:,.0f}' , 'scale' : 'RdYlGn-rev' , 'modify' : lambda x : 100 - self . multiply_hundred ( x ) }
self . general_stats_headers [ 'PCT_NON_CPG_BASES_CONVERTED' ] = { 'title' : '% Non-CpG Methylated' , 'description' : 'Percentage of times a non-CpG cytosine was converted' , 'max' : 100 , 'min' : 0 , 'suffix' : '%' , 'format' : '{:,.0f}' , 'scale' : 'RdYlGn' , 'modify' : lambda x : 100 - self . multiply_hundred ( x ) }
self . general_stats_headers [ 'MEDIAN_CPG_COVERAGE' ] = { 'title' : 'Median CpG Cov' , 'description' : 'Median coverage of CpG sites' , 'min' : 0 }
for s_name in self . picard_rrbs_metrics :
if s_name not in self . general_stats_data :
self . general_stats_data [ s_name ] = dict ( )
self . general_stats_data [ s_name ] . update ( self . picard_rrbs_metrics [ s_name ] )
# Make the bar plot of converted bases
pdata_cpg = dict ( )
pdata_noncpg = dict ( )
for s_name in self . picard_rrbs_metrics . keys ( ) :
pdata_cpg [ s_name ] = dict ( )
pdata_cpg [ s_name ] [ 'converted' ] = self . picard_rrbs_metrics [ s_name ] [ 'CPG_BASES_CONVERTED' ]
pdata_cpg [ s_name ] [ 'not_converted' ] = self . picard_rrbs_metrics [ s_name ] [ 'CPG_BASES_SEEN' ] - self . picard_rrbs_metrics [ s_name ] [ 'CPG_BASES_CONVERTED' ]
pdata_noncpg [ s_name ] = dict ( )
pdata_noncpg [ s_name ] [ 'converted' ] = self . picard_rrbs_metrics [ s_name ] [ 'NON_CPG_BASES' ]
pdata_noncpg [ s_name ] [ 'not_converted' ] = self . picard_rrbs_metrics [ s_name ] [ 'NON_CPG_BASES' ] - self . picard_rrbs_metrics [ s_name ] [ 'NON_CPG_CONVERTED_BASES' ]
keys = OrderedDict ( )
keys [ 'not_converted' ] = { 'name' : 'Unconverted Bases (Methylated)' }
keys [ 'converted' ] = { 'name' : 'Converted Bases (Unmethylated)' }
# Config for the plot
pconfig = { 'id' : 'picard_rrbs_converted_bases_plot' , 'title' : 'Picard: Converted Bases' , 'ylab' : '# CpG Bases' , 'cpswitch_counts_label' : 'Number of Bases' , 'data_labels' : [ { 'name' : 'CpG' , 'ylab' : '# CpG Bases' } , { 'name' : 'Non-CpG' , 'ylab' : '# Non-CpG Bases' } ] }
self . add_section ( name = 'RRBS Converted Bases' , anchor = 'picard-rrbssummary-convertedbases' , plot = bargraph . plot ( [ pdata_cpg , pdata_noncpg ] , [ keys , keys ] , pconfig ) )
# Make the bar plot of processed reads
pdata = dict ( )
for s_name in self . picard_rrbs_metrics . keys ( ) :
pdata [ s_name ] = dict ( )
pdata [ s_name ] [ 'with_no_cpg' ] = self . picard_rrbs_metrics [ s_name ] [ 'READS_WITH_NO_CPG' ]
pdata [ s_name ] [ 'ignored_short' ] = self . picard_rrbs_metrics [ s_name ] [ 'READS_IGNORED_SHORT' ]
pdata [ s_name ] [ 'ignored_mismatches' ] = self . picard_rrbs_metrics [ s_name ] [ 'READS_IGNORED_MISMATCHES' ]
pdata [ s_name ] [ 'not_ignored' ] = ( self . picard_rrbs_metrics [ s_name ] [ 'READS_ALIGNED' ] - pdata [ s_name ] [ 'with_no_cpg' ] - pdata [ s_name ] [ 'ignored_short' ] - pdata [ s_name ] [ 'ignored_mismatches' ] )
keys = OrderedDict ( )
keys [ 'not_ignored' ] = { 'name' : 'Utilised reads' }
keys [ 'with_no_cpg' ] = { 'name' : 'Ignored (no CpG sites)' }
keys [ 'ignored_short' ] = { 'name' : 'Ignored (too short)' }
keys [ 'ignored_mismatches' ] = { 'name' : 'Ignored (exceeded mismatch threshold)' }
# Config for the plot
pconfig = { 'id' : 'picard_rrbs_ignored_reads_plot' , 'title' : 'Picard: RRBS Read Counts' , 'ylab' : '# Reads' , 'cpswitch_counts_label' : 'Number of Reads' }
self . add_section ( name = 'RRBS Read Counts' , anchor = 'picard-rrbssummary-readcounts' , plot = bargraph . plot ( pdata , keys , pconfig ) )
# Return the number of detected samples to the parent module
return len ( self . picard_rrbs_metrics ) |
def get_volume ( self , channel = None ) :
"""Gets the current sound volume by parsing the output of
` ` amixer get < channel > ` ` .
If the channel is not specified , it tries to determine the default one
by running ` ` amixer scontrols ` ` . If that fails as well , it uses the
` ` Playback ` ` channel , as that is the only channel on the EV3.""" | if channel is None :
channel = self . _get_channel ( )
out = check_output ( [ 'amixer' , 'get' , channel ] ) . decode ( )
m = re . search ( r'\[(?P<volume>\d+)%\]' , out )
if m :
return int ( m . group ( 'volume' ) )
else :
raise Exception ( 'Failed to parse output of `amixer get {}`' . format ( channel ) ) |
def prepare_sort_key ( self ) :
'''Triggered by view _ function . _ sort _ converters when our sort key should be created .
This can ' t be called in the constructor because Django models might not be ready yet .''' | if isinstance ( self . convert_type , str ) :
try :
app_name , model_name = self . convert_type . split ( '.' )
except ValueError :
raise ImproperlyConfigured ( '"{}" is not a valid converter type. String-based converter types must be specified in "app.Model" format.' . format ( self . convert_type ) )
try :
self . convert_type = apps . get_model ( app_name , model_name )
except LookupError as e :
raise ImproperlyConfigured ( '"{}" is not a valid model name. {}' . format ( self . convert_type , e ) )
# we reverse sort by ( len ( mro ) , source code order ) so subclasses match first
# on same types , last declared method sorts first
self . sort_key = ( - 1 * len ( inspect . getmro ( self . convert_type ) ) , - 1 * self . source_order ) |
def get_vmpolicy_macaddr_input_vcenter ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_vmpolicy_macaddr = ET . Element ( "get_vmpolicy_macaddr" )
config = get_vmpolicy_macaddr
input = ET . SubElement ( get_vmpolicy_macaddr , "input" )
vcenter = ET . SubElement ( input , "vcenter" )
vcenter . text = kwargs . pop ( 'vcenter' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def invite_other_parties ( self , possible_owners ) :
"""Invites the next lane ' s ( possible ) owner ( s ) to participate""" | signals . lane_user_change . send ( sender = self . user , current = self , old_lane = self . old_lane , possible_owners = possible_owners ) |
def new_cast_status ( self , status ) :
"""Called when a new status received from the Chromecast .""" | self . status = status
if status :
self . status_event . set ( ) |
def is_modified_field ( self , name ) :
"""Returns whether a field is modified or not""" | name = self . get_real_name ( name )
if name in self . __modified_data__ or name in self . __deleted_fields__ :
return True
try :
return self . get_field_value ( name ) . is_modified ( )
except Exception :
return False |
def notify_server_ready ( self , language , config ) :
"""Notify language server availability to code editors .""" | for index in range ( self . get_stack_count ( ) ) :
editor = self . tabs . widget ( index )
if editor . language . lower ( ) == language :
editor . start_lsp_services ( config ) |
def on_each ( self , * targets : raw_types . Qid ) -> op_tree . OP_TREE :
"""Returns a list of operations apply this gate to each of the targets .
Args :
* targets : The qubits to apply this gate to .
Returns :
Operations applying this gate to the target qubits .
Raises :
ValueError if targets are not instances of Qid .""" | return [ self . on ( target ) for target in targets ] |
def setup_signing ( self , secret_key , sign_outgoing = True , allow_unsigned_callback = None , initial_timestamp = None , link_id = None ) :
'''setup for MAVLink2 signing''' | self . mav . signing . secret_key = secret_key
self . mav . signing . sign_outgoing = sign_outgoing
self . mav . signing . allow_unsigned_callback = allow_unsigned_callback
if link_id is None : # auto - increment the link _ id for each link
global global_link_id
link_id = global_link_id
global_link_id = min ( global_link_id + 1 , 255 )
self . mav . signing . link_id = link_id
if initial_timestamp is None : # timestamp is time since 1/1/2015
epoch_offset = 1420070400
now = max ( time . time ( ) , epoch_offset )
initial_timestamp = now - epoch_offset
initial_timestamp = int ( initial_timestamp * 100 * 1000 )
# initial _ timestamp is in 10usec units
self . mav . signing . timestamp = initial_timestamp |
def parse_node ( node ) :
"""Input : a serialized node""" | if node is None or node == b'' :
raise InvalidNode ( "Blank node is not a valid node type in Binary Trie" )
elif node [ 0 ] == BRANCH_TYPE :
if len ( node ) != 65 :
raise InvalidNode ( "Invalid branch node, both child node should be 32 bytes long each" )
# Output : node type , left child , right child
return BRANCH_TYPE , node [ 1 : 33 ] , node [ 33 : ]
elif node [ 0 ] == KV_TYPE :
if len ( node ) <= 33 :
raise InvalidNode ( "Invalid kv node, short of key path or child node hash" )
# Output : node type , keypath : child
return KV_TYPE , decode_to_bin_keypath ( node [ 1 : - 32 ] ) , node [ - 32 : ]
elif node [ 0 ] == LEAF_TYPE :
if len ( node ) == 1 :
raise InvalidNode ( "Invalid leaf node, can not contain empty value" )
# Output : node type , None , value
return LEAF_TYPE , None , node [ 1 : ]
else :
raise InvalidNode ( "Unable to parse node" ) |
def calculate_offset ( cls , labels ) :
'''Return the maximum length of the provided strings that have a nice
variant in DapFormatter . _ nice _ strings''' | used_strings = set ( cls . _nice_strings . keys ( ) ) & set ( labels )
return max ( [ len ( cls . _nice_strings [ s ] ) for s in used_strings ] ) |
def remove_import_statements ( code ) :
"""Removes lines with import statements from the code .
Args :
code : The code to be stripped .
Returns :
The code without import statements .""" | new_code = [ ]
for line in code . splitlines ( ) :
if not line . lstrip ( ) . startswith ( 'import ' ) and not line . lstrip ( ) . startswith ( 'from ' ) :
new_code . append ( line )
while new_code and new_code [ 0 ] == '' :
new_code . pop ( 0 )
while new_code and new_code [ - 1 ] == '' :
new_code . pop ( )
return '\n' . join ( new_code ) |
def run ( self , args ) :
"""Main entry point of script""" | logging . basicConfig ( stream = sys . stdout , level = logging . INFO )
configuration = self . configuration
configuration . locale_dir . parent . makedirs_p ( )
# pylint : disable = attribute - defined - outside - init
self . source_msgs_dir = configuration . source_messages_dir
# The extraction process clobbers django . po and djangojs . po .
# Save them so that it won ' t do that .
self . rename_source_file ( 'django.po' , 'django-saved.po' )
self . rename_source_file ( 'djangojs.po' , 'djangojs-saved.po' )
# Extract strings from mako templates .
verbosity_map = { 0 : "-q" , 1 : "" , 2 : "-v" , }
babel_verbosity = verbosity_map . get ( args . verbose , "" )
if args . verbose :
stderr = None
else :
stderr = DEVNULL
# - - keyword informs Babel that ` interpolate ( ) ` is an expected
# gettext function , which is necessary because the ` tokenize ` function
# in the ` markey ` module marks it as such and passes it to Babel .
# ( These functions are called in the django - babel - underscore module . )
babel_cmd_template = ( 'pybabel {verbosity} extract --mapping={config} ' '--add-comments="Translators:" --keyword="interpolate" ' '. --output={output}' )
babel_mako_cfg = self . base ( configuration . locale_dir , 'babel_mako.cfg' )
if babel_mako_cfg . exists ( ) :
babel_mako_cmd = babel_cmd_template . format ( verbosity = babel_verbosity , config = babel_mako_cfg , output = self . base ( configuration . source_messages_dir , 'mako.po' ) , )
execute ( babel_mako_cmd , working_directory = configuration . root_dir , stderr = stderr )
babel_underscore_cfg = self . base ( configuration . locale_dir , 'babel_underscore.cfg' )
if babel_underscore_cfg . exists ( ) :
babel_underscore_cmd = babel_cmd_template . format ( verbosity = babel_verbosity , config = babel_underscore_cfg , output = self . base ( configuration . source_messages_dir , 'underscore.po' ) , )
execute ( babel_underscore_cmd , working_directory = configuration . root_dir , stderr = stderr )
makemessages = "django-admin.py makemessages -l en -v{}" . format ( args . verbose )
ignores = " " . join ( '--ignore="{}/*"' . format ( d ) for d in configuration . ignore_dirs )
if ignores :
makemessages += " " + ignores
# Extract strings from django source files ( * . py , * . html , * . txt ) .
make_django_cmd = makemessages + ' -d django'
execute ( make_django_cmd , working_directory = configuration . root_dir , stderr = stderr )
# Extract strings from Javascript source files ( * . js , * jsx ) .
make_djangojs_cmd = makemessages + ' -d djangojs -e js,jsx'
execute ( make_djangojs_cmd , working_directory = configuration . root_dir , stderr = stderr )
# makemessages creates ' django . po ' . This filename is hardcoded .
# Rename it to django - partial . po to enable merging into django . po later .
self . rename_source_file ( 'django.po' , 'django-partial.po' )
# makemessages creates ' djangojs . po ' . This filename is hardcoded .
# Rename it to djangojs - partial . po to enable merging into djangojs . po later .
self . rename_source_file ( 'djangojs.po' , 'djangojs-partial.po' )
files_to_clean = set ( )
# Extract strings from third - party applications .
for app_name in configuration . third_party : # Import the app to find out where it is . Then use pybabel to extract
# from that directory .
app_module = importlib . import_module ( app_name )
app_dir = Path ( app_module . __file__ ) . dirname ( ) . dirname ( )
# pylint : disable = no - value - for - parameter
output_file = self . source_msgs_dir / ( app_name + ".po" )
files_to_clean . add ( output_file )
babel_cmd = 'pybabel {verbosity} extract -F {config} -c "Translators:" {app} -o {output}'
babel_cmd = babel_cmd . format ( verbosity = babel_verbosity , config = configuration . locale_dir / 'babel_third_party.cfg' , app = app_name , output = output_file , )
execute ( babel_cmd , working_directory = app_dir , stderr = stderr )
# Segment the generated files .
segmented_files = segment_pofiles ( configuration , configuration . source_locale )
files_to_clean . update ( segmented_files )
# Finish each file .
for filename in files_to_clean :
LOG . info ( 'Cleaning %s' , filename )
pofile = polib . pofile ( self . source_msgs_dir . joinpath ( filename ) )
# replace default headers with edX headers
fix_header ( pofile )
# replace default metadata with edX metadata
fix_metadata ( pofile )
# remove key strings which belong in messages . po
strip_key_strings ( pofile )
pofile . save ( )
# Restore the saved . po files .
self . rename_source_file ( 'django-saved.po' , 'django.po' )
self . rename_source_file ( 'djangojs-saved.po' , 'djangojs.po' ) |
def batch_get_item ( self , batch_list ) :
"""Return a set of attributes for a multiple items in
multiple tables using their primary keys .
: type batch _ list : : class : ` boto . dynamodb . batch . BatchList `
: param batch _ list : A BatchList object which consists of a
list of : class : ` boto . dynamoddb . batch . Batch ` objects .
Each Batch object contains the information about one
batch of objects that you wish to retrieve in this
request .""" | request_items = self . dynamize_request_items ( batch_list )
return self . layer1 . batch_get_item ( request_items , object_hook = item_object_hook ) |
def generate_calculus_integrate_sample ( vlist , ops , min_depth , max_depth , functions ) :
"""Randomly generate a symbolic integral dataset sample .
Given an input expression , produce the indefinite integral .
Args :
vlist : Variable list . List of chars that can be used in the expression .
ops : List of ExprOp instances . The allowed operators for the expression .
min _ depth : Expression trees will not have a smaller depth than this . 0 means
there is just a variable . 1 means there is one operation .
max _ depth : Expression trees will not have a larger depth than this . To make
all trees have the same depth , set this equal to ` min _ depth ` .
functions : Defines special functions . A dict mapping human readable string
names , like " log " , " exp " , " sin " , " cos " , etc . , to single chars . Each
function gets a unique token , like " L " for " log " .
Returns :
sample : String representation of the input . Will be of the form
' var : expression ' .
target : String representation of the solution .""" | var_index = random . randrange ( len ( vlist ) )
var = vlist [ var_index ]
consts = vlist [ : var_index ] + vlist [ var_index + 1 : ]
depth = random . randrange ( min_depth , max_depth + 1 )
expr = random_expr_with_required_var ( depth , var , consts , ops )
expr_str = str ( expr )
sample = var + ":" + expr_str
target = format_sympy_expr ( sympy . integrate ( expr_str , sympy . Symbol ( var ) ) , functions = functions )
return sample , target |
def list_namespaced_job ( self , namespace , ** kwargs ) : # noqa : E501
"""list _ namespaced _ job # noqa : E501
list or watch objects of kind Job # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . list _ namespaced _ job ( namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param bool include _ uninitialized : If true , partially initialized resources are included in the response .
: param str pretty : If ' true ' , then the output is pretty printed .
: param str _ continue : The continue option should be set when retrieving more results from the server . Since this value is server defined , clients may only use the continue value from a previous query result with identical query parameters ( except for the value of continue ) and the server may reject a continue value it does not recognize . If the specified continue value is no longer valid whether due to expiration ( generally five to fifteen minutes ) or a configuration change on the server , the server will respond with a 410 ResourceExpired error together with a continue token . If the client needs a consistent list , it must restart their list without the continue field . Otherwise , the client may send another list request with the token received with the 410 error , the server will respond with a list starting from the next key , but from the latest snapshot , which is inconsistent from the previous list results - objects that are created , modified , or deleted after the first list request will be included in the response , as long as their keys are after the \" next key \" . This field is not supported when watch is true . Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications .
: param str field _ selector : A selector to restrict the list of returned objects by their fields . Defaults to everything .
: param str label _ selector : A selector to restrict the list of returned objects by their labels . Defaults to everything .
: param int limit : limit is a maximum number of responses to return for a list call . If more items exist , the server will set the ` continue ` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results . Setting a limit may return fewer than the requested amount of items ( up to zero items ) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available . Servers may choose not to support the limit argument and will return all of the available results . If limit is specified and the continue field is empty , clients may assume that no more results are available . This field is not supported if watch is true . The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is , no objects created , modified , or deleted after the first request is issued will be included in any subsequent continued requests . This is sometimes referred to as a consistent snapshot , and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects . If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned .
: param str resource _ version : When specified with a watch call , shows changes that occur after that particular version of a resource . Defaults to changes from the beginning of history . When specified for list : - if unset , then the result is returned from remote storage based on quorum - read flag ; - if it ' s 0 , then we simply return what we currently have in cache , no guarantee ; - if set to non zero , then the result is at least as fresh as given rv .
: param int timeout _ seconds : Timeout for the list / watch call . This limits the duration of the call , regardless of any activity or inactivity .
: param bool watch : Watch for changes to the described resources and return them as a stream of add , update , and remove notifications . Specify resourceVersion .
: return : V1JobList
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . list_namespaced_job_with_http_info ( namespace , ** kwargs )
# noqa : E501
else :
( data ) = self . list_namespaced_job_with_http_info ( namespace , ** kwargs )
# noqa : E501
return data |
def expand_short_options ( self , argv ) :
"""Convert grouped short options like ` - abc ` to ` - a , - b , - c ` .
This is necessary because we set ` ` allow _ abbrev = False ` ` on the
` ` ArgumentParser ` ` in : prop : ` self . arg _ parser ` . The argparse docs
say ` ` allow _ abbrev ` ` applies only to long options , but it also
affects whether short options grouped behind a single dash will
be parsed into multiple short options .""" | new_argv = [ ]
for arg in argv :
result = self . parse_multi_short_option ( arg )
new_argv . extend ( result )
return new_argv |
def start ( self ) :
"""Start listening to changes""" | self . running = True
self . thread = threading . Thread ( target = self . _main_loop )
self . thread . start ( ) |
def _process_inputs ( self , input_reader , shard_state , tstate , ctx ) :
"""Read inputs , process them , and write out outputs .
This is the core logic of MapReduce . It reads inputs from input reader ,
invokes user specified mapper function , and writes output with
output writer . It also updates shard _ state accordingly .
e . g . if shard processing is done , set shard _ state . active to False .
If errors . FailJobError is caught , it will fail this MR job .
All other exceptions will be logged and raised to taskqueue for retry
until the number of retries exceeds a limit .
Args :
input _ reader : input reader .
shard _ state : shard state .
tstate : transient shard state .
ctx : mapreduce context .
Returns :
Whether this shard has finished processing all its input split .""" | processing_limit = self . _processing_limit ( tstate . mapreduce_spec )
if processing_limit == 0 :
return
finished_shard = True
# Input reader may not be an iterator . It is only a container .
iterator = iter ( input_reader )
while True :
try :
entity = iterator . next ( )
except StopIteration :
break
# Reading input got exception . If we assume
# 1 . The input reader have done enough retries .
# 2 . The input reader can still serialize correctly after this exception .
# 3 . The input reader , upon resume , will try to re - read this failed
# record .
# 4 . This exception doesn ' t imply the input reader is permanently stuck .
# we can serialize current slice immediately to avoid duplicated
# outputs .
# TODO ( user ) : Validate these assumptions on all readers . MR should
# also have a way to detect fake forward progress .
if isinstance ( entity , db . Model ) :
shard_state . last_work_item = repr ( entity . key ( ) )
elif isinstance ( entity , ndb . Model ) :
shard_state . last_work_item = repr ( entity . key )
else :
shard_state . last_work_item = repr ( entity ) [ : 100 ]
processing_limit -= 1
if not self . _process_datum ( entity , input_reader , ctx , tstate ) :
finished_shard = False
break
elif processing_limit == 0 :
finished_shard = False
break
# Flush context and its pools .
self . slice_context . incr ( context . COUNTER_MAPPER_WALLTIME_MS , int ( ( self . _time ( ) - self . _start_time ) * 1000 ) )
return finished_shard |
def calcValueAtBirth ( cLvlHist , BirthBool , PlvlHist , MrkvHist , DiscFac , CRRA ) :
'''Calculate expected value of being born in each Markov state using the realizations
of consumption for a history of many consumers . The histories should already be
trimmed of the " burn in " periods .
Parameters
cLvlHist : np . array
TxN array of consumption level history for many agents across many periods .
Agents who die are replaced by newborms .
BirthBool : np . array
TxN boolean array indicating when agents are born , replacing one who died .
PlvlHist : np . array
T length vector of aggregate permanent productivity levels .
MrkvHist : np . array
T length vector of integers for the Markov index in each period .
DiscFac : float
Intertemporal discount factor .
CRRA : float
Coefficient of relative risk aversion .
Returns
vAtBirth : np . array
J length vector of average lifetime value at birth by Markov state .''' | J = np . max ( MrkvHist ) + 1
# Number of Markov states
T = MrkvHist . size
# Length of simulation
I = cLvlHist . shape [ 1 ]
# Number of agent indices in histories
u = lambda c : CRRAutility ( c , gam = CRRA )
# Initialize an array to hold each agent ' s lifetime utility
BirthsByPeriod = np . sum ( BirthBool , axis = 1 )
BirthsByState = np . zeros ( J , dtype = int )
for j in range ( J ) :
these = MrkvHist == j
BirthsByState [ j ] = np . sum ( BirthsByPeriod [ these ] )
N = np . max ( BirthsByState )
# Array must hold this many agents per row at least
vArray = np . zeros ( ( J , N ) ) + np . nan
n = np . zeros ( J , dtype = int )
# Loop through each agent index
DiscVec = DiscFac ** np . arange ( T )
for i in range ( I ) :
birth_t = np . where ( BirthBool [ : , i ] ) [ 0 ]
# Loop through each agent who lived and died in this index
for k in range ( birth_t . size - 1 ) : # Last birth event has no death , so ignore
# Get lifespan of this agent and circumstances at birth
t0 = birth_t [ k ]
t1 = birth_t [ k + 1 ]
span = t1 - t0
j = MrkvHist [ t0 ]
# Calculate discounted flow of utility for this agent and store it
cVec = cLvlHist [ t0 : t1 , i ] / PlvlHist [ t0 ]
uVec = u ( cVec )
v = np . dot ( DiscVec [ : span ] , uVec )
vArray [ j , n [ j ] ] = v
n [ j ] += 1
# Calculate expected value at birth by state and return it
vAtBirth = np . nanmean ( vArray , axis = 1 )
return vAtBirth |
def memory_enumerator ( buffer_ , * args , ** kwargs ) :
"""Return an enumerator that knows how to read raw memory .""" | _LOGGER . debug ( "Enumerating through (%d) bytes of archive data." , len ( buffer_ ) )
def opener ( archive_res ) :
_LOGGER . debug ( "Opening from (%d) bytes (memory_enumerator)." , len ( buffer_ ) )
_archive_read_open_memory ( archive_res , buffer_ )
if 'entry_cls' not in kwargs :
kwargs [ 'entry_cls' ] = _ArchiveEntryItReadable
return _enumerator ( opener , * args , ** kwargs ) |
def sample ( self , observations_by_state ) :
"""Sample a new set of distribution parameters given a sample of observations from the given state .
The internal parameters are updated .
Parameters
observations : [ numpy . array with shape ( N _ k , ) ] with nstates elements
observations [ k ] are all observations associated with hidden state k
Examples
initialize output model
> > > B = np . array ( [ [ 0.5 , 0.5 ] , [ 0.1 , 0.9 ] ] )
> > > output _ model = DiscreteOutputModel ( B )
sample given observation
> > > obs = [ [ 0 , 0 , 0 , 1 , 1 , 1 ] , [ 1 , 1 , 1 , 1 , 1 , 1 ] ]
> > > output _ model . sample ( obs )""" | from numpy . random import dirichlet
N , M = self . _output_probabilities . shape
# nstates , nsymbols
for i , obs_by_state in enumerate ( observations_by_state ) : # count symbols found in data
count = np . bincount ( obs_by_state , minlength = M ) . astype ( float )
# sample dirichlet distribution
count += self . prior [ i ]
positive = count > 0
# if counts at all : can ' t sample , so leave output probabilities as they are .
self . _output_probabilities [ i , positive ] = dirichlet ( count [ positive ] ) |
def json_based_stable_hash ( obj ) :
"""Computes a cross - kernel stable hash value for the given object .
The supported data structure are the built - in list , tuple and dict types .
Any included tuple or list , whether outer or nested , may only contain
values of the following built - in types : bool , int , float , complex , str ,
list , tuple and dict .
Any included dict , whether outer or nested , may only contain keys of a
single type , which can be one the following built - in types : bool , int ,
float , str , and may only contain values of only the following built - in
types : bool , int , float , complex , str , list , tuple , dict .
Parameters
obj : bool / int / float / complex / str / dict / list / tuple
The object for which to compute a hash value .
Returns
int
The computed hash value .""" | encoded_str = json . dumps ( obj = obj , skipkeys = False , ensure_ascii = False , check_circular = True , allow_nan = True , cls = None , indent = 0 , separators = ( ',' , ':' ) , default = None , sort_keys = True , ) . encode ( 'utf-8' )
return hashlib . sha256 ( encoded_str ) . hexdigest ( ) |
def install ( args ) :
"Install site from sources or module" | # Deactivate virtualenv
if 'VIRTUAL_ENV' in environ :
LOGGER . warning ( 'Virtualenv enabled: %s' % environ [ 'VIRTUAL_ENV' ] )
# Install from base modules
if args . module :
args . src = op . join ( settings . MOD_DIR , args . module )
assert op . exists ( args . src ) , "Not found module: %s" % args . module
# Fix project name
args . PROJECT = args . PROJECT . replace ( '-' , '_' )
args . home = op . abspath ( args . path )
# Create engine
engine = Installer ( args )
args . deploy_dir = engine . target_dir
# Check dir exists
assert args . info or args . repeat or args . update or not op . exists ( engine . target_dir ) , "Path %s exists. Stop deploy." % args . deploy_dir
try :
if args . repeat :
site = Site ( engine . target_dir )
site . run_install ( )
return site
site = engine . clone_source ( )
if not site :
return True
engine . build ( args . update )
site . run_install ( )
return site
except ( CalledProcessError , AssertionError ) :
LOGGER . error ( "Installation failed" )
LOGGER . error ( "Fix errors and repeat installation with (-r) or run 'makesite uninstall %s' for cancel." % args . deploy_dir )
raise |
def get_category ( category_string , model = Category ) :
"""Convert a string , including a path , and return the Category object""" | model_class = get_cat_model ( model )
category = str ( category_string ) . strip ( "'\"" )
category = category . strip ( '/' )
cat_list = category . split ( '/' )
if len ( cat_list ) == 0 :
return None
try :
categories = model_class . objects . filter ( name = cat_list [ - 1 ] , level = len ( cat_list ) - 1 )
if len ( cat_list ) == 1 and len ( categories ) > 1 :
return None
# If there is only one , use it . If there is more than one , check
# if the parent matches the parent passed in the string
if len ( categories ) == 1 :
return categories [ 0 ]
else :
for item in categories :
if item . parent . name == cat_list [ - 2 ] :
return item
except model_class . DoesNotExist :
return None |
def create_matrix_block_indices ( row_to_obs ) :
"""Parameters
row _ to _ obs : 2D ndarray .
There should be one row per observation per available alternative and
one column per observation . This matrix maps the rows of the design
matrix to the unique observations ( on the columns ) .
Returns
output _ indices : list of arrays .
There will be one array per column in ` row _ to _ obs ` . The array will note
which rows correspond to which observations .""" | # Initialize the list of index arrays to be returned
output_indices = [ ]
# Determine the number of observations in the dataset
num_obs = row_to_obs . shape [ 1 ]
# Get the indices of the non - zero elements and their values
row_indices , col_indices , values = scipy . sparse . find ( row_to_obs )
# Iterate over each observation , i . e . each column in row _ to _ obs , and
# determine which rows belong to that observation ( i . e . the rows with ones
# in them ) .
for col in xrange ( num_obs ) : # Store the array of row indices belonging to the current observation
output_indices . append ( row_indices [ np . where ( col_indices == col ) ] )
return output_indices |
def _sound_settings ( area , setting , value , validate_value ) :
"""Will validate sound settings and values , returns data packet .""" | if validate_value :
if ( setting in CONST . VALID_SOUND_SETTINGS and value not in CONST . ALL_SETTING_SOUND ) :
raise AbodeException ( ERROR . INVALID_SETTING_VALUE , CONST . ALL_SETTING_SOUND )
elif ( setting == CONST . SETTING_ALARM_LENGTH and value not in CONST . ALL_SETTING_ALARM_LENGTH ) :
raise AbodeException ( ERROR . INVALID_SETTING_VALUE , CONST . ALL_SETTING_ALARM_LENGTH )
elif ( setting == CONST . SETTING_FINAL_BEEPS and value not in CONST . ALL_SETTING_FINAL_BEEPS ) :
raise AbodeException ( ERROR . INVALID_SETTING_VALUE , CONST . ALL_SETTING_FINAL_BEEPS )
return { 'area' : area , setting : value } |
def create ( gandi , datacenter , memory , cores , ip_version , bandwidth , login , password , hostname , image , run , background , sshkey , size , vlan , ip , script , script_args , ssh , gen_password ) :
"""Create a new virtual machine .
you can specify a configuration entry named ' sshkey ' containing
path to your sshkey file
$ gandi config set [ - g ] sshkey ~ / . ssh / id _ rsa . pub
or getting the sshkey " my _ key " from your gandi ssh keyring
$ gandi config set [ - g ] sshkey my _ key
to know which disk image label ( or id ) to use as image
$ gandi vm images""" | try :
gandi . datacenter . is_opened ( datacenter , 'iaas' )
except DatacenterLimited as exc :
if exc . date :
gandi . echo ( '/!\ Datacenter %s will be closed on %s, ' 'please consider using another datacenter.' % ( datacenter , exc . date ) )
if gandi . image . is_deprecated ( image , datacenter ) :
gandi . echo ( '/!\ Image %s is deprecated and will soon be unavailable.' % image )
pwd = None
if gen_password :
pwd = mkpassword ( )
if password or ( not pwd and not sshkey ) :
pwd = click . prompt ( 'password' , hide_input = True , confirmation_prompt = True )
if ip and not vlan :
gandi . echo ( "--ip can't be used without --vlan." )
return
if not vlan and not ip_version :
ip_version = 6
if not ip_version :
gandi . echo ( "* Private only ip vm (can't enable emergency web console " 'access).' )
# Display a short summary for creation
if login :
user_summary = 'root and %s users' % login
password_summary = 'Users root and %s' % login
else :
user_summary = 'root user'
password_summary = 'User root'
gandi . echo ( '* %s will be created.' % user_summary )
if sshkey :
gandi . echo ( '* SSH key authorization will be used.' )
if pwd and gen_password :
gandi . echo ( '* %s setup with password %s' % ( password_summary , pwd ) )
if not pwd :
gandi . echo ( '* No password supplied for vm (required to enable ' 'emergency web console access).' )
result = gandi . iaas . create ( datacenter , memory , cores , ip_version , bandwidth , login , pwd , hostname , image , run , background , sshkey , size , vlan , ip , script , script_args , ssh )
if background :
gandi . echo ( '* IAAS backend is now creating your VM and its ' 'associated resources in the background.' )
return result |
def create ( cls , model , parent = None , uifile = '' , commit = True ) :
"""Prompts the user to create a new record for the inputed table .
: param model | < subclass of orb . Table >
parent | < QWidget >
: return < orb . Table > | | None / | instance of the inputed table class""" | # create the dialog
dlg = QDialog ( parent )
dlg . setWindowTitle ( 'Create %s' % model . schema ( ) . name ( ) )
# create the widget
cls = model . schema ( ) . property ( 'widgetClass' , cls )
widget = cls ( dlg )
if ( uifile ) :
widget . setUiFile ( uifile )
widget . setModel ( model )
widget . layout ( ) . setContentsMargins ( 0 , 0 , 0 , 0 )
# create buttons
opts = QDialogButtonBox . Save | QDialogButtonBox . Cancel
btns = QDialogButtonBox ( opts , Qt . Horizontal , dlg )
# create layout
layout = QVBoxLayout ( )
layout . addWidget ( widget )
layout . addWidget ( btns )
dlg . setLayout ( layout )
dlg . adjustSize ( )
# create connections
btns . accepted . connect ( widget . save )
btns . rejected . connect ( dlg . reject )
widget . saved . connect ( dlg . accept )
if ( dlg . exec_ ( ) ) :
record = widget . record ( )
if ( commit ) :
record . commit ( )
return record
return None |
def make_imshow_plot ( grid , name ) :
"""Takes a grid of RGB or RGBA values and a filename to save the figure into .
Generates a figure by coloring all grid cells appropriately .""" | plt . tick_params ( labelbottom = "off" , labeltop = "off" , labelleft = "off" , labelright = "off" , bottom = "off" , top = "off" , left = "off" , right = "off" )
plt . imshow ( grid , interpolation = "nearest" , aspect = 1 , zorder = 1 )
plt . tight_layout ( )
plt . savefig ( name , dpi = 1000 , bbox_inches = "tight" ) |
def logspace_bins ( self , bins = None , units = None , conversion_function = convert_time , resolution = None ) :
"""Generates bin edges for a logspace tiling : there is one edge more than bins and each bin is between two edges""" | bins = self . logspace ( bins = bins , units = units , conversion_function = conversion_function , resolution = resolution , end_at_end = False )
resolution = np . mean ( ( bins [ : - 1 ] ) / ( bins [ 1 : ] ) )
bins = np . concatenate ( [ bins * np . sqrt ( resolution ) , bins [ - 1 : ] / np . sqrt ( resolution ) ] )
return bins |
def get_product ( id = None , name = None ) :
"""Get a specific Product by name or ID""" | content = get_product_raw ( id , name )
if content :
return utils . format_json ( content ) |
def postprocess_keyevent ( self , event ) :
"""Post - process keypress event :
in InternalShell , this is method is called when shell is ready""" | event , text , key , ctrl , shift = restore_keyevent ( event )
# Is cursor on the last line ? and after prompt ?
if len ( text ) : # XXX : Shouldn ' t it be : ` if len ( unicode ( text ) . strip ( os . linesep ) ) ` ?
if self . has_selected_text ( ) :
self . check_selection ( )
self . restrict_cursor_position ( self . current_prompt_pos , 'eof' )
cursor_position = self . get_position ( 'cursor' )
if key in ( Qt . Key_Return , Qt . Key_Enter ) :
if self . is_cursor_on_last_line ( ) :
self . _key_enter ( )
# add and run selection
else :
self . insert_text ( self . get_selected_text ( ) , at_end = True )
elif key == Qt . Key_Insert and not shift and not ctrl :
self . setOverwriteMode ( not self . overwriteMode ( ) )
elif key == Qt . Key_Delete :
if self . has_selected_text ( ) :
self . check_selection ( )
self . remove_selected_text ( )
elif self . is_cursor_on_last_line ( ) :
self . stdkey_clear ( )
elif key == Qt . Key_Backspace :
self . _key_backspace ( cursor_position )
elif key == Qt . Key_Tab :
self . _key_tab ( )
elif key == Qt . Key_Space and ctrl :
self . _key_ctrl_space ( )
elif key == Qt . Key_Left :
if self . current_prompt_pos == cursor_position : # Avoid moving cursor on prompt
return
method = self . extend_selection_to_next if shift else self . move_cursor_to_next
method ( 'word' if ctrl else 'character' , direction = 'left' )
elif key == Qt . Key_Right :
if self . is_cursor_at_end ( ) :
return
method = self . extend_selection_to_next if shift else self . move_cursor_to_next
method ( 'word' if ctrl else 'character' , direction = 'right' )
elif ( key == Qt . Key_Home ) or ( ( key == Qt . Key_Up ) and ctrl ) :
self . _key_home ( shift , ctrl )
elif ( key == Qt . Key_End ) or ( ( key == Qt . Key_Down ) and ctrl ) :
self . _key_end ( shift , ctrl )
elif key == Qt . Key_Up :
if not self . is_cursor_on_last_line ( ) :
self . set_cursor_position ( 'eof' )
y_cursor = self . get_coordinates ( cursor_position ) [ 1 ]
y_prompt = self . get_coordinates ( self . current_prompt_pos ) [ 1 ]
if y_cursor > y_prompt :
self . stdkey_up ( shift )
else :
self . browse_history ( backward = True )
elif key == Qt . Key_Down :
if not self . is_cursor_on_last_line ( ) :
self . set_cursor_position ( 'eof' )
y_cursor = self . get_coordinates ( cursor_position ) [ 1 ]
y_end = self . get_coordinates ( 'eol' ) [ 1 ]
if y_cursor < y_end :
self . stdkey_down ( shift )
else :
self . browse_history ( backward = False )
elif key in ( Qt . Key_PageUp , Qt . Key_PageDown ) : # XXX : Find a way to do this programmatically instead of calling
# widget keyhandler ( this won ' t work if the * event * is coming from
# the event queue - i . e . if the busy buffer is ever implemented )
ConsoleBaseWidget . keyPressEvent ( self , event )
elif key == Qt . Key_Escape and shift :
self . clear_line ( )
elif key == Qt . Key_Escape :
self . _key_escape ( )
elif key == Qt . Key_L and ctrl :
self . clear_terminal ( )
elif key == Qt . Key_V and ctrl :
self . paste ( )
elif key == Qt . Key_X and ctrl :
self . cut ( )
elif key == Qt . Key_Z and ctrl :
self . undo ( )
elif key == Qt . Key_Y and ctrl :
self . redo ( )
elif key == Qt . Key_A and ctrl :
self . selectAll ( )
elif key == Qt . Key_Question and not self . has_selected_text ( ) :
self . _key_question ( text )
elif key == Qt . Key_ParenLeft and not self . has_selected_text ( ) :
self . _key_parenleft ( text )
elif key == Qt . Key_Period and not self . has_selected_text ( ) :
self . _key_period ( text )
elif len ( text ) and not self . isReadOnly ( ) :
self . hist_wholeline = False
self . insert_text ( text )
self . _key_other ( text )
else : # Let the parent widget handle the key press event
ConsoleBaseWidget . keyPressEvent ( self , event ) |
def _show_details ( self ) :
"""Show traceback on its own dialog""" | if self . details . isVisible ( ) :
self . details . hide ( )
self . details_btn . setText ( _ ( 'Show details' ) )
else :
self . resize ( 570 , 700 )
self . details . document ( ) . setPlainText ( '' )
self . details . append_text_to_shell ( self . error_traceback , error = True , prompt = False )
self . details . show ( )
self . details_btn . setText ( _ ( 'Hide details' ) ) |
async def ack ( self , msg ) :
"""Used to manually acks a message .
: param msg : Message which is pending to be acked by client .""" | ack_proto = protocol . Ack ( )
ack_proto . subject = msg . proto . subject
ack_proto . sequence = msg . proto . sequence
await self . _nc . publish ( msg . sub . ack_inbox , ack_proto . SerializeToString ( ) ) |
def get_objective ( self ) :
"""Gets the related objective .
return : ( osid . learning . Objective ) - the related objective
raise : OperationFailed - unable to complete request
compliance : mandatory - This method must be implemented .""" | # Note that this makes the generic objectives call to Handcar
# without specifying the objectiveBank :
url_str = ( self . _base_url + '/objectives/' + self . _my_map [ 'objectiveId' ] )
return Objective ( self . _load_json ( url_str ) ) |
def up_to ( self , term : str ) -> str :
"""Parse and return segment terminated by the first occurence of a string .
Args :
term : Terminating string .
Raises :
EndOfInput : If ` term ` does not occur in the rest of the input text .""" | end = self . input . find ( term , self . offset )
if end < 0 :
raise EndOfInput ( self )
res = self . input [ self . offset : end ]
self . offset = end + 1
return res |
def pad ( args ) :
"""% prog pad blastfile cdtfile - - qbed q . pad . bed - - sbed s . pad . bed
Test and reconstruct candidate PADs .""" | from jcvi . formats . cdt import CDT
p = OptionParser ( pad . __doc__ )
p . set_beds ( )
p . add_option ( "--cutoff" , default = .3 , type = "float" , help = "The clustering cutoff to call similar [default: %default]" )
opts , args = p . parse_args ( args )
if len ( args ) != 2 :
sys . exit ( not p . print_help ( ) )
cutoff = opts . cutoff
blastfile , cdtfile = args
qbed , sbed , qorder , sorder , is_self = check_beds ( blastfile , p , opts )
cdt = CDT ( cdtfile )
qparts = list ( cdt . iter_partitions ( cutoff = cutoff ) )
sparts = list ( cdt . iter_partitions ( cutoff = cutoff , gtr = False ) )
qid , sid = { } , { }
for i , part in enumerate ( qparts ) :
qid . update ( dict ( ( x , i ) for x in part ) )
for i , part in enumerate ( sparts ) :
sid . update ( dict ( ( x , i ) for x in part ) )
# Without writing files , conversion from PAD to merged PAD is done in memory
for q in qbed :
q . seqid = qid [ q . seqid ]
for s in sbed :
s . seqid = sid [ s . seqid ]
qnames = range ( len ( qparts ) )
snames = range ( len ( sparts ) )
logmp = make_arrays ( blastfile , qbed , sbed , qnames , snames )
m , n = logmp . shape
pvalue_cutoff = 1e-30
cutoff = - log ( pvalue_cutoff )
significant = [ ]
for i in xrange ( m ) :
for j in xrange ( n ) :
score = logmp [ i , j ]
if score < cutoff :
continue
significant . append ( ( qparts [ i ] , sparts [ j ] , score ) )
for a , b , score in significant :
print ( "|" . join ( a ) , "|" . join ( b ) , score )
logging . debug ( "Collected {0} PAR comparisons significant at (P < {1})." . format ( len ( significant ) , pvalue_cutoff ) )
return significant |
def process_multientry ( entry_list , prod_comp , coeff_threshold = 1e-4 ) :
"""Static method for finding a multientry based on
a list of entries and a product composition .
Essentially checks to see if a valid aqueous
reaction exists between the entries and the
product composition and returns a MultiEntry
with weights according to the coefficients if so .
Args :
entry _ list ( [ Entry ] ) : list of entries from which to
create a MultiEntry
prod _ comp ( Composition ) : composition constraint for setting
weights of MultiEntry
coeff _ threshold ( float ) : threshold of stoichiometric
coefficients to filter , if weights are lower than
this value , the entry is not returned""" | dummy_oh = [ Composition ( "H" ) , Composition ( "O" ) ]
try : # Get balanced reaction coeffs , ensuring all < 0 or conc thresh
# Note that we get reduced compositions for solids and non - reduced
# compositions for ions because ions aren ' t normalized due to
# their charge state .
entry_comps = [ e . composition for e in entry_list ]
rxn = Reaction ( entry_comps + dummy_oh , [ prod_comp ] )
coeffs = - np . array ( [ rxn . get_coeff ( comp ) for comp in entry_comps ] )
# Return None if reaction coeff threshold is not met
# TODO : this filtration step might be put somewhere else
if ( coeffs > coeff_threshold ) . all ( ) :
return MultiEntry ( entry_list , weights = coeffs . tolist ( ) )
else :
return None
except ReactionError :
return None |
def encode ( self , pad = 106 ) :
"""Encodes this AIT command to binary .
If pad is specified , it indicates the maximum size of the encoded
command in bytes . If the encoded command is less than pad , the
remaining bytes are set to zero .
Commands sent to ISS payloads over 1553 are limited to 64 words
(128 bytes ) with 11 words ( 22 bytes ) of CCSDS overhead ( SSP
52050J , Section 3.2.3.4 ) . This leaves 53 words ( 106 bytes ) for
the command itself .""" | opcode = struct . pack ( '>H' , self . defn . opcode )
offset = len ( opcode )
size = max ( offset + self . defn . argsize , pad )
encoded = bytearray ( size )
encoded [ 0 : offset ] = opcode
encoded [ offset ] = self . defn . argsize
offset += 1
index = 0
for defn in self . defn . argdefns :
if defn . fixed :
value = defn . value
else :
value = self . args [ index ]
index += 1
encoded [ defn . slice ( offset ) ] = defn . encode ( value )
return encoded |
def _get_kind ( cls ) :
"""Override .
Make sure that the kind returned is the root class of the
polymorphic hierarchy .""" | bases = cls . _get_hierarchy ( )
if not bases : # We have to jump through some hoops to call the superclass '
# _ get _ kind ( ) method . First , this is called by the metaclass
# before the PolyModel name is defined , so it can ' t use
# super ( PolyModel , cls ) . _ get _ kind ( ) . Second , we can ' t just call
# Model . _ get _ kind ( ) because that always returns ' Model ' . Hence
# the ' im _ func ' hack .
return model . Model . _get_kind . im_func ( cls )
else :
return bases [ 0 ] . _class_name ( ) |
import itertools
def deduplicate ( input_list ) :
"""Function to remove duplicates from a list of lists or list of elements .
Examples :
deduplicate ( [ [ 10 , 20 ] , [ 40 ] , [ 30 , 56 , 25 ] , [ 10 , 20 ] , [ 33 ] , [ 40 ] ] )
[ [ 10 , 20 ] , [ 30 , 56 , 25 ] , [ 33 ] , [ 40 ] ]
deduplicate ( [ ' a ' , ' b ' , ' a ' , ' c ' , ' c ' ] )
[ ' a ' , ' b ' , ' c ' ]
deduplicate ( [ 1 , 3 , 5 , 6 , 3 , 5 , 6 , 1 ] )
[1 , 3 , 5 , 6]
Args :
input _ list : List of lists or list of unique elements to deduplicate
Returns :
List of lists or list of unique elements""" | input_list . sort ( )
deduplicated = list ( item for item , _ in itertools . groupby ( input_list ) )
return deduplicated |
def listProcessingEras ( self , processing_version = '' ) :
"""Returns all processing eras in dbs""" | conn = self . dbi . connection ( )
try :
result = self . pelst . execute ( conn , processing_version )
return result
finally :
if conn :
conn . close ( ) |
def load_image ( self , file_path , redraw = True ) :
"""Accepts a path to an 8 x 8 image file and updates the LED matrix with
the image""" | if not os . path . exists ( file_path ) :
raise IOError ( '%s not found' % file_path )
img = Image . open ( file_path ) . convert ( 'RGB' )
pixel_list = list ( map ( list , img . getdata ( ) ) )
if redraw :
self . set_pixels ( pixel_list )
return pixel_list |
def noun ( self , plural : bool = False ) -> str :
"""Return a random noun in German .
: param plural : Return noun in plural .
: return : Noun .""" | key = 'plural' if plural else 'noun'
return self . random . choice ( self . _data [ key ] ) |
def to_pn ( self , sub_letter = None ) :
"""Returns the part number equivalent . For instance ,
a ' 1k ' would still be ' 1k ' , but a
'1.2k ' would , instead , be a ' 1k2'
: return :""" | string = str ( self )
if '.' not in string :
return string
# take care of the case of when there is no scaling unit
if not string [ - 1 ] . isalpha ( ) :
if sub_letter is not None :
return string . replace ( '.' , sub_letter )
return string
letter = string [ - 1 ]
return string . replace ( '.' , letter ) [ : - 1 ] |
def get_access_token ( self , verifier = None ) :
"""Return the access token for this API . If we ' ve not fetched it yet ,
go out , request and memoize it .""" | if self . _access_token is None :
self . _access_token , self . _access_token_dict = self . _get_access_token ( verifier )
return self . _access_token |
def transformer_prepare_encoder ( inputs , target_space , hparams , features = None ) :
"""Prepare one shard of the model for the encoder .
Args :
inputs : a Tensor .
target _ space : a Tensor .
hparams : run hyperparameters
features : optionally pass the entire features dictionary as well .
This is needed now for " packed " datasets .
Returns :
encoder _ input : a Tensor , bottom of encoder stack
encoder _ self _ attention _ bias : a bias tensor for use in encoder self - attention
encoder _ decoder _ attention _ bias : a bias tensor for use in encoder - decoder
attention""" | ishape_static = inputs . shape . as_list ( )
encoder_input = inputs
if features and "inputs_segmentation" in features : # Packed dataset . Keep the examples from seeing each other .
inputs_segmentation = features [ "inputs_segmentation" ]
inputs_position = features [ "inputs_position" ]
targets_segmentation = features [ "targets_segmentation" ]
if ( hasattr ( hparams , "unidirectional_encoder" ) and hparams . unidirectional_encoder ) :
tf . logging . info ( "Using unidirectional encoder" )
encoder_self_attention_bias = ( common_attention . attention_bias_lower_triangle ( common_layers . shape_list ( inputs ) [ 1 ] ) )
else :
encoder_self_attention_bias = ( common_attention . attention_bias_same_segment ( inputs_segmentation , inputs_segmentation ) )
encoder_decoder_attention_bias = ( common_attention . attention_bias_same_segment ( targets_segmentation , inputs_segmentation ) )
else :
encoder_padding = common_attention . embedding_to_padding ( encoder_input )
ignore_padding = common_attention . attention_bias_ignore_padding ( encoder_padding )
if ( hasattr ( hparams , "unidirectional_encoder" ) and hparams . unidirectional_encoder ) :
tf . logging . info ( "Using unidirectional encoder" )
encoder_self_attention_bias = ( common_attention . attention_bias_lower_triangle ( common_layers . shape_list ( inputs ) [ 1 ] ) )
else : # Usual case - not a packed dataset .
encoder_self_attention_bias = ignore_padding
encoder_decoder_attention_bias = ignore_padding
inputs_position = None
if hparams . proximity_bias :
encoder_self_attention_bias += common_attention . attention_bias_proximal ( common_layers . shape_list ( inputs ) [ 1 ] )
if target_space is not None and hparams . get ( "use_target_space_embedding" , True ) : # Append target _ space _ id embedding to inputs .
emb_target_space = common_layers . embedding ( target_space , 32 , ishape_static [ - 1 ] , name = "target_space_embedding" , dtype = hparams . get ( "activation_dtype" , "float32" ) )
emb_target_space = tf . reshape ( emb_target_space , [ 1 , 1 , - 1 ] )
encoder_input += emb_target_space
if hparams . pos == "timing" :
if inputs_position is not None :
encoder_input = common_attention . add_timing_signal_1d_given_position ( encoder_input , inputs_position )
else :
encoder_input = common_attention . add_timing_signal_1d ( encoder_input )
elif hparams . pos == "emb" :
encoder_input = common_attention . add_positional_embedding ( encoder_input , hparams . max_length , "inputs_positional_embedding" , inputs_position )
encoder_self_attention_bias = common_layers . cast_like ( encoder_self_attention_bias , encoder_input )
encoder_decoder_attention_bias = common_layers . cast_like ( encoder_decoder_attention_bias , encoder_input )
return ( encoder_input , encoder_self_attention_bias , encoder_decoder_attention_bias ) |
def white_noise ( space , mean = 0 , stddev = 1 , seed = None ) :
"""Standard gaussian noise in space , pointwise ` ` N ( mean , stddev * * 2 ) ` ` .
Parameters
space : ` TensorSpace ` or ` ProductSpace `
The space in which the noise is created .
mean : ` ` space . field ` ` element or ` ` space ` ` ` element - like ` , optional
The mean of the white noise . If a scalar , it is interpreted as
` ` mean * space . one ( ) ` ` .
If ` ` space ` ` is complex , the real and imaginary parts are interpreted
as the mean of their respective part of the noise .
stddev : ` float ` or ` ` space ` ` ` element - like ` , optional
The standard deviation of the white noise . If a scalar , it is
interpreted as ` ` stddev * space . one ( ) ` ` .
seed : int , optional
Random seed to use for generating the noise .
For ` ` None ` ` , use the current seed .
Returns
white _ noise : ` ` space ` ` element
See Also
poisson _ noise
salt _ pepper _ noise
numpy . random . normal""" | from odl . space import ProductSpace
with NumpyRandomSeed ( seed ) :
if isinstance ( space , ProductSpace ) :
values = [ white_noise ( subspace , mean , stddev ) for subspace in space ]
else :
if space . is_complex :
real = np . random . normal ( loc = mean . real , scale = stddev , size = space . shape )
imag = np . random . normal ( loc = mean . imag , scale = stddev , size = space . shape )
values = real + 1j * imag
else :
values = np . random . normal ( loc = mean , scale = stddev , size = space . shape )
return space . element ( values ) |
def pip_list ( self , name = None , prefix = None , abspath = True ) :
"""Get list of pip installed packages .""" | if ( name and prefix ) or not ( name or prefix ) :
raise TypeError ( "conda pip: exactly one of 'name' " "or 'prefix' " "required." )
if name :
prefix = self . get_prefix_envname ( name )
pip_command = os . sep . join ( [ prefix , 'bin' , 'python' ] )
cmd_list = [ pip_command , PIP_LIST_SCRIPT ]
process_worker = ProcessWorker ( cmd_list , pip = True , parse = True , callback = self . _pip_list , extra_kwargs = { 'prefix' : prefix } )
process_worker . sig_finished . connect ( self . _start )
self . _queue . append ( process_worker )
self . _start ( )
return process_worker |
def cbow_batch ( centers , contexts , num_tokens , dtype , index_dtype ) :
"""Create a batch for CBOW training objective .""" | contexts_data , contexts_row , contexts_col = contexts
centers = mx . nd . array ( centers , dtype = index_dtype )
contexts = mx . nd . sparse . csr_matrix ( ( contexts_data , ( contexts_row , contexts_col ) ) , dtype = dtype , shape = ( len ( centers ) , num_tokens ) )
# yapf : disable
return centers , contexts |
def _clean_str ( self , s ) :
"""Returns a lowercase string with punctuation and bad chars removed
: param s : string to clean""" | return s . translate ( str . maketrans ( '' , '' , punctuation ) ) . replace ( '\u200b' , " " ) . strip ( ) . lower ( ) |
def _check_reliability ( self , old_value = None , new_value = None ) :
"""This function is called when the object is created and after
one of its configuration properties has changed . The new and old value
parameters are ignored , this is called after the property has been
changed and this is only concerned with the current value .""" | if _debug :
LocalScheduleObject . _debug ( "_check_reliability %r %r" , old_value , new_value )
try :
schedule_default = self . scheduleDefault
if schedule_default is None :
raise ValueError ( "scheduleDefault expected" )
if not isinstance ( schedule_default , Atomic ) :
raise TypeError ( "scheduleDefault must be an instance of an atomic type" )
schedule_datatype = schedule_default . __class__
if _debug :
LocalScheduleObject . _debug ( " - schedule_datatype: %r" , schedule_datatype )
if ( self . weeklySchedule is None ) and ( self . exceptionSchedule is None ) :
raise ValueError ( "schedule required" )
# check the weekly schedule values
if self . weeklySchedule :
for daily_schedule in self . weeklySchedule :
for time_value in daily_schedule . daySchedule :
if _debug :
LocalScheduleObject . _debug ( " - daily time_value: %r" , time_value )
if time_value is None :
pass
elif not isinstance ( time_value . value , ( Null , schedule_datatype ) ) :
if _debug :
LocalScheduleObject . _debug ( " - wrong type: expected %r, got %r" , schedule_datatype , time_value . __class__ , )
raise TypeError ( "wrong type" )
elif 255 in time_value . time :
if _debug :
LocalScheduleObject . _debug ( " - wildcard in time" )
raise ValueError ( "must be a specific time" )
# check the exception schedule values
if self . exceptionSchedule :
for special_event in self . exceptionSchedule :
for time_value in special_event . listOfTimeValues :
if _debug :
LocalScheduleObject . _debug ( " - special event time_value: %r" , time_value )
if time_value is None :
pass
elif not isinstance ( time_value . value , ( Null , schedule_datatype ) ) :
if _debug :
LocalScheduleObject . _debug ( " - wrong type: expected %r, got %r" , schedule_datatype , time_value . __class__ , )
raise TypeError ( "wrong type" )
# check list of object property references
obj_prop_refs = self . listOfObjectPropertyReferences
if obj_prop_refs :
for obj_prop_ref in obj_prop_refs :
if obj_prop_ref . deviceIdentifier :
raise RuntimeError ( "no external references" )
# get the datatype of the property to be written
obj_type = obj_prop_ref . objectIdentifier [ 0 ]
datatype = get_datatype ( obj_type , obj_prop_ref . propertyIdentifier )
if _debug :
LocalScheduleObject . _debug ( " - datatype: %r" , datatype )
if issubclass ( datatype , Array ) and ( obj_prop_ref . propertyArrayIndex is not None ) :
if obj_prop_ref . propertyArrayIndex == 0 :
datatype = Unsigned
else :
datatype = datatype . subtype
if _debug :
LocalScheduleObject . _debug ( " - datatype: %r" , datatype )
if datatype is not schedule_datatype :
if _debug :
LocalScheduleObject . _debug ( " - wrong type: expected %r, got %r" , datatype , schedule_datatype , )
raise TypeError ( "wrong type" )
# all good
self . reliability = 'noFaultDetected'
if _debug :
LocalScheduleObject . _debug ( " - no fault detected" )
except Exception as err :
if _debug :
LocalScheduleObject . _debug ( " - exception: %r" , err )
self . reliability = 'configurationError' |
def replace_by_etree ( self , root_el , el_idx = 0 ) :
"""Replace element .
Select element that has the same name as ` ` root _ el ` ` , then replace the selected
element with ` ` root _ el ` `
` ` root _ el ` ` can be a single element or the root of an element tree .
Args :
root _ el : element
New element that will replace the existing element .""" | el = self . get_element_by_name ( root_el . tag , el_idx )
el [ : ] = list ( root_el )
el . attrib = root_el . attrib |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.