signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def terminate ( self ) :
"""Called when an existing task is removed from the configuration .
This sets a Do Not Resuscitate flag and then initiates a stop
sequence . Once all processes have stopped , the task will delete
itself .""" | log = self . _params . get ( 'log' , self . _discard )
self . _dnr = time . time ( )
self . stop ( )
log . info ( "Task '%s' marked for death" , self . _name ) |
def reachability_latency ( tnet = None , paths = None , rratio = 1 , calc = 'global' ) :
"""Reachability latency . This is the r - th longest temporal path .
Parameters
data : array or dict
Can either be a network ( graphlet or contact ) , binary unidrected only . Alternative can be a paths dictionary ( output of teneto . networkmeasure . shortest _ temporal _ path )
rratio : float ( default : 1)
reachability ratio that the latency is calculated in relation to .
Value must be over 0 and up to 1.
1 ( default ) - all nodes must be reached .
Other values ( e . g . . 5 imply that 50 % of nodes are reached )
This is rounded to the nearest node inter .
E . g . if there are 6 nodes [ 1,2,3,4,5,6 ] , it will be node 4 ( due to round upwards )
calc : str
what to calculate . Alternatives : ' global ' entire network ; ' nodes ' : for each node .
Returns
reach _ lat : array
Reachability latency
Notes
Reachability latency calculates the time it takes for the paths .""" | if tnet is not None and paths is not None :
raise ValueError ( 'Only network or path input allowed.' )
if tnet is None and paths is None :
raise ValueError ( 'No input.' )
# if shortest paths are not calculated , calculate them
if tnet is not None :
paths = shortest_temporal_path ( tnet )
pathmat = np . zeros ( [ paths [ [ 'from' , 'to' ] ] . max ( ) . max ( ) + 1 , paths [ [ 'from' , 'to' ] ] . max ( ) . max ( ) + 1 , paths [ [ 't_start' ] ] . max ( ) . max ( ) + 1 ] ) * np . nan
pathmat [ paths [ 'from' ] . values , paths [ 'to' ] . values , paths [ 't_start' ] . values ] = paths [ 'temporal-distance' ]
netshape = pathmat . shape
edges_to_reach = netshape [ 0 ] - np . round ( netshape [ 0 ] * rratio )
reach_lat = np . zeros ( [ netshape [ 1 ] , netshape [ 2 ] ] ) * np . nan
for t_ind in range ( 0 , netshape [ 2 ] ) :
paths_sort = - np . sort ( - pathmat [ : , : , t_ind ] , axis = 1 )
reach_lat [ : , t_ind ] = paths_sort [ : , edges_to_reach ]
if calc == 'global' :
reach_lat = np . nansum ( reach_lat )
reach_lat = reach_lat / ( ( netshape [ 0 ] ) * netshape [ 2 ] )
elif calc == 'nodes' :
reach_lat = np . nansum ( reach_lat , axis = 1 )
reach_lat = reach_lat / ( netshape [ 2 ] )
return reach_lat |
def read_rle ( file_obj , header , bit_width , debug_logging ) :
"""Read a run - length encoded run from the given fo with the given header and bit _ width .
The count is determined from the header and the width is used to grab the
value that ' s repeated . Yields the value repeated count times .""" | count = header >> 1
zero_data = b"\x00\x00\x00\x00"
width = ( bit_width + 7 ) // 8
data = file_obj . read ( width )
data = data + zero_data [ len ( data ) : ]
value = struct . unpack ( b"<i" , data ) [ 0 ]
if debug_logging :
logger . debug ( "Read RLE group with value %s of byte-width %s and count %s" , value , width , count )
for _ in range ( count ) :
yield value |
def _GetMessage ( self , message_file_key , lcid , message_identifier ) :
"""Retrieves a specific message from a specific message table .
Args :
message _ file _ key ( int ) : message file key .
lcid ( int ) : language code identifier ( LCID ) .
message _ identifier ( int ) : message identifier .
Returns :
str : message string or None if not available .
Raises :
RuntimeError : if more than one value is found in the database .""" | table_name = 'message_table_{0:d}_0x{1:08x}' . format ( message_file_key , lcid )
has_table = self . _database_file . HasTable ( table_name )
if not has_table :
return None
column_names = [ 'message_string' ]
condition = 'message_identifier == "0x{0:08x}"' . format ( message_identifier )
values = list ( self . _database_file . GetValues ( [ table_name ] , column_names , condition ) )
number_of_values = len ( values )
if number_of_values == 0 :
return None
if number_of_values == 1 :
return values [ 0 ] [ 'message_string' ]
raise RuntimeError ( 'More than one value found in database.' ) |
def _get_storage_manager ( resource ) :
"""Return a storage manager which can process this resource .""" | for manager in ( AmazonS3 , ArvadosKeep , SevenBridges , DNAnexus , AzureBlob , GoogleCloud , RegularServer ) :
if manager . check_resource ( resource ) :
return manager ( )
raise ValueError ( "Unexpected object store %(resource)s" % { "resource" : resource } ) |
def parse_expression ( val , acceptable_types , name = None , raise_type = ValueError ) :
"""Attempts to parse the given ` val ` as a python expression of the specified ` acceptable _ types ` .
: param string val : A string containing a python expression .
: param acceptable _ types : The acceptable types of the parsed object .
: type acceptable _ types : type | tuple of types . The tuple may be nested ; ie anything ` isinstance `
accepts .
: param string name : An optional logical name for the value being parsed ; ie if the literal val
represents a person ' s age , ' age ' .
: param type raise _ type : The type of exception to raise for all failures ; ValueError by default .
: raises : If ` val ` is not a valid python literal expression or it is but evaluates to an object
that is not a an instance of one of the ` acceptable _ types ` .""" | def format_type ( typ ) :
return typ . __name__
if not isinstance ( val , string_types ) :
raise raise_type ( 'The raw `val` is not a string. Given {} of type {}.' . format ( val , format_type ( type ( val ) ) ) )
def get_name ( ) :
return repr ( name ) if name else 'value'
def format_raw_value ( ) :
lines = val . splitlines ( )
for line_number in range ( 0 , len ( lines ) ) :
lines [ line_number ] = "{line_number:{width}}: {line}" . format ( line_number = line_number + 1 , line = lines [ line_number ] , width = len ( str ( len ( lines ) ) ) )
return '\n' . join ( lines )
try :
parsed_value = eval ( val )
except Exception as e :
raise raise_type ( dedent ( """\
The {name} cannot be evaluated as a literal expression: {error}
Given raw value:
{value}
""" . format ( name = get_name ( ) , error = e , value = format_raw_value ( ) ) ) )
if not isinstance ( parsed_value , acceptable_types ) :
def iter_types ( types ) :
if isinstance ( types , type ) :
yield types
elif isinstance ( types , tuple ) :
for item in types :
for typ in iter_types ( item ) :
yield typ
else :
raise ValueError ( 'The given acceptable_types is not a valid type (tuple): {}' . format ( acceptable_types ) )
raise raise_type ( dedent ( """\
The {name} is not of the expected type(s): {types}:
Given the following raw value that evaluated to type {type}:
{value}
""" . format ( name = get_name ( ) , types = ', ' . join ( format_type ( t ) for t in iter_types ( acceptable_types ) ) , type = format_type ( type ( parsed_value ) ) , value = format_raw_value ( ) ) ) )
return parsed_value |
def remove_duplicates_from_list ( array ) :
"Preserves the order of elements in the list" | output = [ ]
unique = set ( )
for a in array :
if a not in unique :
unique . add ( a )
output . append ( a )
return output |
def efficiency_capacity_demand_difference ( slots , events , X , ** kwargs ) :
"""A function that calculates the total difference between demand for an event
and the slot capacity it is scheduled in .""" | overflow = 0
for row , event in enumerate ( events ) :
for col , slot in enumerate ( slots ) :
overflow += ( event . demand - slot . capacity ) * X [ row , col ]
return overflow |
def _find_references ( model_name , references = None ) :
"""Iterate over model references for ` model _ name `
and return a list of parent model specifications ( including those of
` model _ name ` , ordered from parent to child ) .""" | references = references or [ ]
references . append ( model_name )
ref = MODELS [ model_name ] . get ( 'reference' )
if ref is not None :
_find_references ( ref , references )
parent_models = [ m for m in references ]
parent_models . reverse ( )
return parent_models |
def obj_to_grid ( self , file_path = None , delim = None , tab = None , quote_numbers = True , quote_empty_str = False ) :
"""This will return a str of a grid table .
: param file _ path : path to data file , defaults to
self ' s contents if left alone
: param delim : dict of deliminators , defaults to
obj _ to _ str ' s method :
: param tab : string of offset of the table
: param quote _ numbers : bool if True will quote numbers that are strings
: param quote _ empty _ str : bool if True will quote empty strings
: return : string representing the grid formation
of the relevant data""" | div_delims = { "top" : [ 'top left corner' , 'top intersect' , 'top edge' , 'top right corner' ] , "divide" : [ 'left major intersect' , 'internal major intersect' , 'bottom edge' , 'right major intersect' ] , "middle" : [ 'left intersect' , 'internal intersect' , 'internal horizontal edge' , 'right intersect' ] , "bottom" : [ 'bottom left intersect' , 'bottom intersect' , 'bottom edge' , 'bottom right corner' ] }
delim = delim if delim else { }
for tag in self . FANCY . keys ( ) :
delim [ tag ] = delim [ tag ] if tag in delim . keys ( ) else self . FANCY [ tag ]
tab = self . tab if tab is None else tab
list_of_list , column_widths = self . get_data_and_shared_column_widths ( data_kwargs = dict ( quote_numbers = quote_numbers , quote_empty_str = quote_empty_str ) , width_kwargs = dict ( padding = 0 , pad_last_column = True ) )
ret = [ [ cell . ljust ( column_widths [ i ] ) for i , cell in enumerate ( row ) ] for row in list_of_list ]
grid_row = { }
for key in div_delims . keys ( ) :
draw = div_delims [ key ]
grid_row [ key ] = delim [ draw [ 0 ] ]
grid_row [ key ] += delim [ draw [ 1 ] ] . join ( [ delim [ draw [ 2 ] ] * width for width in column_widths ] )
grid_row [ key ] += delim [ draw [ 3 ] ]
ret = [ delim [ 'left edge' ] + delim [ 'internal vertical edge' ] . join ( row ) + delim [ 'right edge' ] for row in ret ]
header = [ grid_row [ "top" ] , ret [ 0 ] , grid_row [ "divide" ] ]
body = [ [ row , grid_row [ "middle" ] ] for row in ret [ 1 : ] ]
body = [ item for pair in body for item in pair ] [ : - 1 ]
ret = header + body + [ grid_row [ "bottom" ] ]
ret = tab + ( u'\n' + tab ) . join ( ret )
self . _save_file ( file_path , ret )
return ret |
def read_json ( path , default = None , fatal = True , logger = None ) :
""": param str | None path : Path to file to deserialize
: param dict | list | None default : Default if file is not present , or if it ' s not json
: param bool | None fatal : Abort execution on failure if True
: param callable | None logger : Logger to use
: return dict | list : Deserialized data from file""" | path = resolved_path ( path )
if not path or not os . path . exists ( path ) :
if default is None :
return abort ( "No file %s" , short ( path ) , fatal = ( fatal , default ) )
return default
try :
with io . open ( path , "rt" ) as fh :
data = json . load ( fh )
if default is not None and type ( data ) != type ( default ) :
return abort ( "Wrong type %s for %s, expecting %s" , type ( data ) , short ( path ) , type ( default ) , fatal = ( fatal , default ) )
if logger :
logger ( "Read %s" , short ( path ) )
return data
except Exception as e :
return abort ( "Couldn't read %s: %s" , short ( path ) , e , fatal = ( fatal , default ) ) |
def send_packet ( self , packet , protocol = 'json' , time_precision = None ) :
"""Send an UDP packet .
: param packet : the packet to be sent
: type packet : ( if protocol is ' json ' ) dict
( if protocol is ' line ' ) list of line protocol strings
: param protocol : protocol of input data , either ' json ' or ' line '
: type protocol : str
: param time _ precision : Either ' s ' , ' m ' , ' ms ' or ' u ' , defaults to None
: type time _ precision : str""" | if protocol == 'json' :
data = make_lines ( packet , time_precision ) . encode ( 'utf-8' )
elif protocol == 'line' :
data = ( '\n' . join ( packet ) + '\n' ) . encode ( 'utf-8' )
self . udp_socket . sendto ( data , ( self . _host , self . _udp_port ) ) |
def offset_gen ( offset , iterable , skip_signal = None ) :
'''A generator that applies an ` offset ` , skipping ` offset ` elements from
` iterable ` . If skip _ signal is a callable , it will be called with every
skipped element .''' | offset = int ( offset )
assert offset >= 0 , 'negative offset'
for item in iterable :
if offset > 0 :
offset -= 1
if callable ( skip_signal ) :
skip_signal ( item )
else :
yield item |
def monte_carlo_vol ( self , ctrs , ndraws = 10000 , rstate = None , return_overlap = False , kdtree = None ) :
"""Using ` ndraws ` Monte Carlo draws , estimate the volume of the
* union * of cubes . If ` return _ overlap = True ` , also returns the
estimated fractional overlap with the unit cube . Uses a K - D Tree
to perform the search if provided .""" | if rstate is None :
rstate = np . random
# Estimate the volume using Monte Carlo integration .
samples = [ self . sample ( ctrs , rstate = rstate , return_q = True , kdtree = kdtree ) for i in range ( ndraws ) ]
qsum = sum ( [ q for ( x , q ) in samples ] )
vol = 1. * ndraws / qsum * len ( ctrs ) * self . vol_cube
if return_overlap : # Estimate the fractional overlap with the unit cube using
# the same set of samples .
qin = sum ( [ q * unitcheck ( x ) for ( x , q ) in samples ] )
overlap = 1. * qin / qsum
return vol , overlap
else :
return vol |
def _percentile ( self , values , percent , key = lambda x : x ) :
"""Find the percentile of a list of values .
Args :
values : A list of values for which percentiles are desired
percent : A float value from 0 to 100 representing the requested percentile .
key : optional key function to compute value from each element of N .
Return :
The percentile of the values""" | vals = sorted ( values )
k = ( len ( vals ) - 1 ) * ( percent / 100 )
f = math . floor ( k )
c = math . ceil ( k )
if f == c :
return key ( vals [ int ( k ) ] )
d0 = key ( vals [ int ( f ) ] ) * ( c - k )
d1 = key ( vals [ int ( c ) ] ) * ( k - f )
return d0 + d1 |
def init_encoders ( self , config = DataConfig ( ) ) :
"""Initialize the integer encoder and the one - hot encoder , fitting them to the vocabulary
of the corpus .
NB :
From here on out ,
- ' ie ' stands for ' integer encoded ' , and
- ' ohe ' stands for ' one - hot encoded '""" | self . log ( 'info' , 'Initializing the encoders ...' )
# create the integer encoder and fit it to our corpus ' vocab
self . ie = LabelEncoder ( )
self . ie_vocab = self . ie . fit_transform ( self . vocab_list )
self . pad_u_index = self . ie . transform ( [ self . pad_u ] ) [ 0 ]
# create the OHE encoder and fit it to our corpus ' vocab
self . ohe = OneHotEncoder ( sparse = False )
self . ohe_vocab = self . ohe . fit_transform ( self . ie_vocab . reshape ( len ( self . ie_vocab ) , 1 ) )
return |
def xcompile ( source_code , args = 0 , optimize = True ) :
"""Parses Crianza source code and returns a native Python function .
Args :
args : The resulting function ' s number of input parameters .
Returns :
A callable Python function .""" | code = crianza . compile ( crianza . parse ( source_code ) , optimize = optimize )
return crianza . native . compile ( code , args = args ) |
def split_lines ( tokenlist ) :
"""Take a single list of ( Token , text ) tuples and yield one such list for each
line . Just like str . split , this will yield at least one item .
: param tokenlist : List of ( token , text ) or ( token , text , mouse _ handler )
tuples .""" | line = [ ]
for item in tokenlist : # For ( token , text ) tuples .
if len ( item ) == 2 :
token , string = item
parts = string . split ( '\n' )
for part in parts [ : - 1 ] :
if part :
line . append ( ( token , part ) )
yield line
line = [ ]
line . append ( ( token , parts [ - 1 ] ) )
# Note that parts [ - 1 ] can be empty , and that ' s fine . It happens
# in the case of [ ( Token . SetCursorPosition , ' ' ) ] .
# For ( token , text , mouse _ handler ) tuples .
# I know , partly copy / paste , but understandable and more efficient
# than many tests .
else :
token , string , mouse_handler = item
parts = string . split ( '\n' )
for part in parts [ : - 1 ] :
if part :
line . append ( ( token , part , mouse_handler ) )
yield line
line = [ ]
line . append ( ( token , parts [ - 1 ] , mouse_handler ) )
# Always yield the last line , even when this is an empty line . This ensures
# that when ` tokenlist ` ends with a newline character , an additional empty
# line is yielded . ( Otherwise , there ' s no way to differentiate between the
# cases where ` tokenlist ` does and doesn ' t end with a newline . )
yield line |
def _get_elements ( mol , label ) :
"""The the elements of the atoms in the specified order
Args :
mol : The molecule . OpenBabel OBMol object .
label : The atom indices . List of integers .
Returns :
Elements . List of integers .""" | elements = [ int ( mol . GetAtom ( i ) . GetAtomicNum ( ) ) for i in label ]
return elements |
def run_job ( self , job , backend = 'simulator' , shots = 1 , max_credits = None , seed = None , hub = None , group = None , project = None , hpc = None , access_token = None , user_id = None ) :
"""Execute a job""" | if access_token :
self . req . credential . set_token ( access_token )
if user_id :
self . req . credential . set_user_id ( user_id )
if not self . check_credentials ( ) :
return { "error" : "Not credentials valid" }
backend_type = self . _check_backend ( backend , 'job' )
if not backend_type :
raise BadBackendError ( backend )
if isinstance ( job , ( list , tuple ) ) :
qasms = job
for qasm in qasms :
qasm [ 'qasm' ] = qasm [ 'qasm' ] . replace ( 'IBMQASM 2.0;' , '' )
qasm [ 'qasm' ] = qasm [ 'qasm' ] . replace ( 'OPENQASM 2.0;' , '' )
data = { 'qasms' : qasms , 'shots' : shots , 'backend' : { } }
if max_credits :
data [ 'maxCredits' ] = max_credits
if seed and len ( str ( seed ) ) < 11 and str ( seed ) . isdigit ( ) :
data [ 'seed' ] = seed
elif seed :
return { "error" : "Not seed allowed. Max 10 digits." }
data [ 'backend' ] [ 'name' ] = backend_type
elif isinstance ( job , dict ) :
q_obj = job
data = { 'qObject' : q_obj , 'backend' : { } }
data [ 'backend' ] [ 'name' ] = backend_type
else :
return { "error" : "Not a valid data to send" }
if hpc :
data [ 'hpc' ] = hpc
url = get_job_url ( self . config , hub , group , project )
job = self . req . post ( url , data = json . dumps ( data ) )
return job |
def remove_reftrack ( self , reftrack ) :
"""Remove the reftrack from the root .
This will not handle row deletion in the model !
It is automatically done when calling : meth : ` Reftrack . delete ` .
: param reftrack : the reftrack object to remove
: type reftrack : : class : ` Reftrack `
: returns : None
: rtype : None
: raises : None""" | self . _reftracks . remove ( reftrack )
refobj = reftrack . get_refobj ( )
if refobj and refobj in self . _parentsearchdict :
del self . _parentsearchdict [ refobj ] |
def _update_limits_from_api ( self ) :
"""Query ELB ' s DescribeAccountLimits API action , and update limits
with the quotas returned . Updates ` ` self . limits ` ` .""" | self . connect ( )
logger . debug ( "Querying ELB DescribeAccountLimits for limits" )
attribs = self . conn . describe_account_limits ( )
name_to_limits = { 'classic-load-balancers' : 'Active load balancers' , 'classic-listeners' : 'Listeners per load balancer' , 'classic-registered-instances' : 'Registered instances per load balancer' }
for attrib in attribs [ 'Limits' ] :
if int ( attrib . get ( 'Max' , 0 ) ) == 0 :
continue
name = attrib . get ( 'Name' , 'unknown' )
if name not in name_to_limits :
continue
self . limits [ name_to_limits [ name ] ] . _set_api_limit ( int ( attrib [ 'Max' ] ) )
# connect to ELBv2 API as well
self . conn2 = client ( 'elbv2' , ** self . _boto3_connection_kwargs )
logger . debug ( "Connected to %s in region %s" , 'elbv2' , self . conn2 . _client_config . region_name )
logger . debug ( "Querying ELBv2 (ALB) DescribeAccountLimits for limits" )
attribs = self . conn2 . describe_account_limits ( )
name_to_limits = { 'target-groups' : 'Target groups' , 'listeners-per-application-load-balancer' : 'Listeners per application load balancer' , 'rules-per-application-load-balancer' : 'Rules per application load balancer' , 'network-load-balancers' : 'Network load balancers' , 'listeners-per-network-load-balancer' : 'Listeners per network load balancer' }
for attrib in attribs [ 'Limits' ] :
if int ( attrib . get ( 'Max' , 0 ) ) == 0 :
continue
name = attrib . get ( 'Name' , 'unknown' )
if name not in name_to_limits :
continue
self . limits [ name_to_limits [ name ] ] . _set_api_limit ( int ( attrib [ 'Max' ] ) )
logger . debug ( "Done setting limits from API" ) |
def markowitz ( I , sigma , r , alpha ) :
"""markowitz - - simple markowitz model for portfolio optimization .
Parameters :
- I : set of items
- sigma [ i ] : standard deviation of item i
- r [ i ] : revenue of item i
- alpha : acceptance threshold
Returns a model , ready to be solved .""" | model = Model ( "markowitz" )
x = { }
for i in I :
x [ i ] = model . addVar ( vtype = "C" , name = "x(%s)" % i )
# quantity of i to buy
model . addCons ( quicksum ( r [ i ] * x [ i ] for i in I ) >= alpha )
model . addCons ( quicksum ( x [ i ] for i in I ) == 1 )
# set nonlinear objective : SCIP only allow for linear objectives hence the following
obj = model . addVar ( vtype = "C" , name = "objective" , lb = None , ub = None )
# auxiliary variable to represent objective
model . addCons ( quicksum ( sigma [ i ] ** 2 * x [ i ] * x [ i ] for i in I ) <= obj )
model . setObjective ( obj , "minimize" )
model . data = x
return model |
def service ( ctx ) :
"""Install systemd service configuration""" | install_service ( ctx . obj [ 'instance' ] , ctx . obj [ 'dbhost' ] , ctx . obj [ 'dbname' ] , ctx . obj [ 'port' ] ) |
def Rz_to_lambdanu ( R , z , ac = 5. , Delta = 1. ) :
"""NAME :
Rz _ to _ lambdanu
PURPOSE :
calculate the prolate spheroidal coordinates ( lambda , nu ) from
galactocentric cylindrical coordinates ( R , z )
by solving eq . ( 2.2 ) in Dejonghe & de Zeeuw ( 1988a ) for ( lambda , nu ) :
R ^ 2 = ( l + a ) * ( n + a ) / ( a - g )
z ^ 2 = ( l + g ) * ( n + g ) / ( g - a )
Delta ^ 2 = g - a
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
ac - axis ratio of the coordinate surfaces
( a / c ) = sqrt ( - a ) / sqrt ( - g ) ( default : 5 . )
Delta - focal distance that defines the spheroidal coordinate system ( default : 1 . )
Delta = sqrt ( g - a )
OUTPUT :
( lambda , nu )
HISTORY :
2015-02-13 - Written - Trick ( MPIA )""" | g = Delta ** 2 / ( 1. - ac ** 2 )
a = g - Delta ** 2
term = R ** 2 + z ** 2 - a - g
discr = ( R ** 2 + z ** 2 - Delta ** 2 ) ** 2 + ( 4. * Delta ** 2 * R ** 2 )
l = 0.5 * ( term + nu . sqrt ( discr ) )
n = 0.5 * ( term - nu . sqrt ( discr ) )
if isinstance ( z , float ) and z == 0. :
l = R ** 2 - a
n = - g
elif isinstance ( z , nu . ndarray ) and nu . sum ( z == 0. ) > 0 :
if isinstance ( R , float ) :
l [ z == 0. ] = R ** 2 - a
if isinstance ( R , sc . ndarray ) :
l [ z == 0. ] = R [ z == 0. ] ** 2 - a
n [ z == 0. ] = - g
return ( l , n ) |
def restore ( self , ** kwargs ) :
"""Restores this version as a new version , and returns this new version .
If a current version already exists , it will be terminated before
restoring this version .
Relations ( foreign key , reverse foreign key , many - to - many ) are not
restored with the old version . If provided in kwargs ,
( Versioned ) ForeignKey fields will be set to the provided values .
If passing an id for a ( Versioned ) ForeignKey , use the field . attname .
For example :
restore ( team _ id = myteam . pk )
If passing an object , simply use the field name , e . g . :
restore ( team = myteam )
If a ( Versioned ) ForeignKey is not nullable and no value is provided
for it in kwargs , a ForeignKeyRequiresValueError will be raised .
: param kwargs : arguments used to initialize the class instance
: return : Versionable""" | if not self . pk :
raise ValueError ( 'Instance must be saved and terminated before it can be ' 'restored.' )
if self . is_current :
raise ValueError ( 'This is the current version, no need to restore it.' )
if self . get_deferred_fields ( ) : # It would be necessary to fetch the record from the database
# again for this to succeed .
# Alternatively , perhaps it would be possible to create a copy
# of the object after fetching the missing fields .
# Doing so may be unexpected by the calling code , so raise an
# exception : the calling code should be adapted if necessary .
raise ValueError ( 'Can not restore a model instance that has deferred fields' )
cls = self . __class__
now = get_utc_now ( )
restored = copy . copy ( self )
restored . version_end_date = None
restored . version_start_date = now
fields = [ f for f in cls . _meta . local_fields if f . name not in Versionable . VERSIONABLE_FIELDS ]
for field in fields :
if field . attname in kwargs : # Fake an object in order to avoid a DB roundtrip
# This was made necessary , since assigning to the field ' s
# attname did not work anymore with Django 2.0
obj = field . remote_field . model ( id = kwargs [ field . attname ] )
setattr ( restored , field . name , obj )
elif field . name in kwargs :
setattr ( restored , field . name , kwargs [ field . name ] )
elif isinstance ( field , ForeignKey ) : # Set all non - provided ForeignKeys to None . If required ,
# raise an error .
try :
setattr ( restored , field . name , None )
# Check for non null foreign key removed since Django 1.10
# https : / / docs . djangoproject . com / en / 1.10 / releases / 1.10/
# # removed - null - assignment - check - for - non - null - foreign -
# key - fields
if not field . null :
raise ValueError
except ValueError :
raise ForeignKeyRequiresValueError
self . id = self . uuid ( )
with transaction . atomic ( ) : # If this is not the latest version , terminate the latest version
latest = cls . objects . current_version ( self , check_db = True )
if latest and latest != self :
latest . delete ( )
restored . version_start_date = latest . version_end_date
self . save ( )
restored . save ( )
# Update ManyToMany relations to point to the old version ' s id
# instead of the restored version ' s id .
for field_name in self . get_all_m2m_field_names ( ) :
manager = getattr ( restored , field_name )
# returns a VersionedRelatedManager instance
manager . through . objects . filter ( ** { manager . source_field . attname : restored . id } ) . update ( ** { manager . source_field_name : self } )
return restored |
def process ( specs ) :
"""Executes the passed in list of specs""" | pout , pin = chain_specs ( specs )
LOG . info ( "Processing" )
sw = StopWatch ( ) . start ( )
r = pout . process ( pin )
if r :
print ( r )
LOG . info ( "Finished in %s" , sw . read ( ) ) |
def ripple_withdrawal ( self , amount , address , currency ) :
"""Returns true if successful .""" | data = { 'amount' : amount , 'address' : address , 'currency' : currency }
response = self . _post ( "ripple_withdrawal/" , data = data , return_json = True )
return self . _expect_true ( response ) |
def get_streams ( self , game = None , channels = None , limit = 25 , offset = 0 ) :
"""Return a list of streams queried by a number of parameters
sorted by number of viewers descending
: param game : the game or name of the game
: type game : : class : ` str ` | : class : ` models . Game `
: param channels : list of models . Channels or channel names ( can be mixed )
: type channels : : class : ` list ` of : class : ` models . Channel ` or : class : ` str `
: param limit : maximum number of results
: type limit : : class : ` int `
: param offset : offset for pagination
: type offset : : class : ` int `
: returns : A list of streams
: rtype : : class : ` list ` of : class : ` models . Stream `
: raises : None""" | if isinstance ( game , models . Game ) :
game = game . name
channelnames = [ ]
cparam = None
if channels :
for c in channels :
if isinstance ( c , models . Channel ) :
c = c . name
channelnames . append ( c )
cparam = ',' . join ( channelnames )
params = { 'limit' : limit , 'offset' : offset , 'game' : game , 'channel' : cparam }
r = self . kraken_request ( 'GET' , 'streams' , params = params )
return models . Stream . wrap_search ( r ) |
def convert_spanstring ( span_string ) :
"""converts a span of tokens ( str , e . g . ' word _ 88 . . word _ 91 ' )
into a list of token IDs ( e . g . [ ' word _ 88 ' , ' word _ 89 ' , ' word _ 90 ' , ' word _ 91 ' ]
Note : Please don ' t use this function directly , use spanstring2tokens ( )
instead , which checks for non - existing tokens !
Examples
> > > convert _ spanstring ( ' word _ 1 ' )
[ ' word _ 1 ' ]
> > > convert _ spanstring ( ' word _ 2 , word _ 3 ' )
[ ' word _ 2 ' , ' word _ 3 ' ]
> > > convert _ spanstring ( ' word _ 7 . . word _ 11 ' )
[ ' word _ 7 ' , ' word _ 8 ' , ' word _ 9 ' , ' word _ 10 ' , ' word _ 11 ' ]
> > > convert _ spanstring ( ' word _ 2 , word _ 3 , word _ 7 . . word _ 9 ' )
[ ' word _ 2 ' , ' word _ 3 ' , ' word _ 7 ' , ' word _ 8 ' , ' word _ 9 ' ]
> > > convert _ spanstring ( ' word _ 7 . . word _ 9 , word _ 15 , word _ 17 . . word _ 19 ' )
[ ' word _ 7 ' , ' word _ 8 ' , ' word _ 9 ' , ' word _ 15 ' , ' word _ 17 ' , ' word _ 18 ' , ' word _ 19 ' ]""" | prefix_err = "All tokens must share the same prefix: {0} vs. {1}"
tokens = [ ]
if not span_string :
return tokens
spans = span_string . split ( ',' )
for span in spans :
span_elements = span . split ( '..' )
if len ( span_elements ) == 1 :
tokens . append ( span_elements [ 0 ] )
elif len ( span_elements ) == 2 :
start , end = span_elements
start_prefix , start_id_str = start . split ( '_' )
end_prefix , end_id_str = end . split ( '_' )
assert start_prefix == end_prefix , prefix_err . format ( start_prefix , end_prefix )
tokens . extend ( "{0}_{1}" . format ( start_prefix , token_id ) for token_id in range ( int ( start_id_str ) , int ( end_id_str ) + 1 ) )
else :
raise ValueError ( "Can't parse span '{}'" . format ( span_string ) )
first_prefix = tokens [ 0 ] . split ( '_' ) [ 0 ]
for token in tokens :
token_parts = token . split ( '_' )
assert len ( token_parts ) == 2 , "All token IDs must use the format prefix + '_' + number"
assert token_parts [ 0 ] == first_prefix , prefix_err . format ( token_parts [ 0 ] , first_prefix )
return tokens |
def buildDiscover ( base_url , out_dir ) :
"""Convert all files in a directory to apache mod _ asis files in
another directory .""" | test_data = discoverdata . readTests ( discoverdata . default_test_file )
def writeTestFile ( test_name ) :
template = test_data [ test_name ]
data = discoverdata . fillTemplate ( test_name , template , base_url , discoverdata . example_xrds )
out_file_name = os . path . join ( out_dir , test_name )
out_file = file ( out_file_name , 'w' )
out_file . write ( data )
manifest = [ manifest_header ]
for success , input_name , id_name , result_name in discoverdata . testlist :
if not success :
continue
writeTestFile ( input_name )
input_url = urlparse . urljoin ( base_url , input_name )
id_url = urlparse . urljoin ( base_url , id_name )
result_url = urlparse . urljoin ( base_url , result_name )
manifest . append ( '\t' . join ( ( input_url , id_url , result_url ) ) )
manifest . append ( '\n' )
manifest_file_name = os . path . join ( out_dir , 'manifest.txt' )
manifest_file = file ( manifest_file_name , 'w' )
for chunk in manifest :
manifest_file . write ( chunk )
manifest_file . close ( ) |
def revert_file ( self , file = None ) :
"""Reverts either given file or current * * Script _ Editor _ tabWidget * * Widget tab Model editor file .
: param file : File to revert .
: type file : unicode
: return : Method success .
: rtype : bool""" | editor = file and self . get_editor ( file ) or self . get_current_editor ( )
if not editor :
return False
file = editor . file
LOGGER . info ( "{0} | Reverting '{1}' file!" . format ( self . __class__ . __name__ , file ) )
if self . reload_file ( file , is_modified = False ) :
return True |
def _bind_device ( self ) :
"""This method implements ` ` _ bind _ device ` ` from : class : ` ~ lewis . core . devices . InterfaceBase ` .
It binds Cmd and Var definitions to implementations in Interface and Device .""" | patterns = set ( )
self . bound_commands = [ ]
for cmd in self . commands :
bound = cmd . bind ( self ) or cmd . bind ( self . device ) or None
if bound is None :
raise RuntimeError ( 'Unable to produce callable object for non-existing member \'{}\' ' 'of device or interface.' . format ( cmd . func ) )
for bound_cmd in bound :
pattern = bound_cmd . matcher . pattern
if pattern in patterns :
raise RuntimeError ( 'The regular expression {} is ' 'associated with multiple commands.' . format ( pattern ) )
patterns . add ( pattern )
self . bound_commands . append ( bound_cmd ) |
def display_map ( fname ) :
"""view a text file ( map ) in high resolution""" | print ( "viewing " , fname )
app = view_tk ( None )
app . show_grid_from_file ( fname )
app . title ( 'Map View' )
# app . after ( 2000 , vais _ main _ loop ( app ) )
# bind mouse and keyboard for interactivity
# frame = Frame ( app , width = 100 , height = 100)
# frame . bind ( " < Button - 1 > " , callback )
app . canvas . bind ( "<Button-1>" , callback )
app . bind ( "<Key>" , key )
app . mainloop ( ) |
def read_lines_from_file ( cls_name , filename ) :
"""Read lines from file , parsing out header and metadata .""" | with tf . io . gfile . GFile ( filename , "rb" ) as f :
lines = [ tf . compat . as_text ( line ) [ : - 1 ] for line in f ]
header_line = "%s%s" % ( _HEADER_PREFIX , cls_name )
if lines [ 0 ] != header_line :
raise ValueError ( "File {fname} does not seem to have been created from " "{name}.save_to_file." . format ( fname = filename , name = cls_name ) )
metadata_dict = json . loads ( lines [ 1 ] [ len ( _METADATA_PREFIX ) : ] )
return lines [ 2 : ] , metadata_dict |
def filter ( self , func ) :
"""Filter array along an axis .
Applies a function which should evaluate to boolean ,
along a single axis or multiple axes . Array will be
aligned so that the desired set of axes are in the
keys , which may require a transpose / reshape .
Parameters
func : function
Function to apply , should return boolean""" | if self . mode == 'local' :
reshaped = self . _align ( self . baseaxes )
filtered = asarray ( list ( filter ( func , reshaped ) ) )
if self . labels is not None :
mask = asarray ( list ( map ( func , reshaped ) ) )
if self . mode == 'spark' :
sort = False if self . labels is None else True
filtered = self . values . filter ( func , axis = self . baseaxes , sort = sort )
if self . labels is not None :
keys , vals = zip ( * self . values . map ( func , axis = self . baseaxes , value_shape = ( 1 , ) ) . tordd ( ) . collect ( ) )
perm = sorted ( range ( len ( keys ) ) , key = keys . __getitem__ )
mask = asarray ( vals ) [ perm ]
if self . labels is not None :
s1 = prod ( self . baseshape )
newlabels = self . labels . reshape ( s1 , 1 ) [ mask ] . squeeze ( )
else :
newlabels = None
return self . _constructor ( filtered , labels = newlabels ) . __finalize__ ( self , noprop = ( 'labels' , ) ) |
def init_app ( application ) :
"""Initialise an application
Set up whitenoise to handle static files .""" | config = { k : v for k , v in application . config . items ( ) if k in SCHEMA }
kwargs = { 'autorefresh' : application . debug }
kwargs . update ( ( k [ 11 : ] . lower ( ) , v ) for k , v in config . items ( ) )
instance = whitenoise . WhiteNoise ( application . wsgi_app , ** kwargs )
instance . add_files ( application . static_folder , application . static_url_path )
if not hasattr ( application , 'extensions' ) :
application . extensions = { }
application . extensions [ 'whitenoise' ] = instance
application . wsgi_app = instance |
def key_for_property ( cls , kind , property ) :
"""Return the _ _ property _ _ key for property of kind .
Args :
kind : kind whose key is requested .
property : property whose key is requested .
Returns :
The key for property of kind .""" | return model . Key ( Kind . KIND_NAME , kind , Property . KIND_NAME , property ) |
def format_list ( self , at_char , user , list_name ) :
'''Return formatted HTML for a list .''' | return '<a href="https://twitter.com/%s/lists/%s">%s%s/%s</a>' % ( user , list_name , at_char , user , list_name ) |
def merge ( data , skip = 50 , fraction = 1.0 ) :
"""Merge one every ' skip ' clouds into a single emcee population ,
using the later ' fraction ' of the run .""" | w , s , d = data . chains . shape
start = int ( ( 1.0 - fraction ) * s )
total = int ( ( s - start ) / skip )
return data . chains [ : , start : : skip , : ] . reshape ( ( w * total , d ) ) |
def find_user_emails ( self , user ) :
"""Find all the UserEmail object belonging to a user .""" | user_emails = self . db_adapter . find_objects ( self . UserEmailClass , user_id = user . id )
return user_emails |
def remove_primary_analyses ( self ) :
"""Remove analyses relocated to partitions""" | for ar , analyses in self . analyses_to_remove . items ( ) :
analyses_ids = list ( set ( map ( api . get_id , analyses ) ) )
ar . manage_delObjects ( analyses_ids )
self . analyses_to_remove = dict ( ) |
def _format_name_map ( self , lonc , latc ) :
'''Return the name of the map in the good format''' | return '_' . join ( [ 'WAC' , 'GLOBAL' ] + [ 'E' + latc + lonc , "{0:0>3}" . format ( self . ppd ) + 'P' ] ) |
def _load_data ( data_file , data_type ) :
"""Load data from CSV , JSON , Excel , . . . , formats .""" | raw_data = data_file . read ( )
if data_type is None :
data_type = data_file . name . split ( '.' ) [ - 1 ]
# Data list to process
data = [ ]
# JSON type
if data_type == 'json' :
data = json . loads ( raw_data )
return data
# CSV type
elif data_type == 'csv' :
csv_data = StringIO ( raw_data )
reader = csv . DictReader ( csv_data , delimiter = ',' )
for line in reader :
data . append ( line )
return data
elif data_type in [ 'xlsx' , 'xlsm' , 'xltx' , 'xltm' ] :
excel_data = StringIO ( raw_data )
wb = openpyxl . load_workbook ( excel_data )
ws = wb . active
# First headers
headers = [ ]
for row in ws . iter_rows ( max_row = 1 ) :
for cell in row :
tmp = '_' . join ( cell . value . split ( " " ) ) . lower ( )
headers . append ( tmp )
# Simulate DictReader
for row in ws . iter_rows ( row_offset = 1 ) :
values = [ ]
for cell in row :
values . append ( cell . value )
tmp = dict ( itertools . izip ( headers , values ) )
if len ( values ) == len ( headers ) and not row_empty ( values ) :
data . append ( tmp )
return data
# PO type
elif data_type == 'po' :
po = polib . pofile ( raw_data )
for entry in po . untranslated_entries ( ) :
data . append ( entry . __dict__ )
return data
# PROPERTIES type ( used in Java and Firefox extensions )
elif data_type == 'properties' :
lines = raw_data . split ( '\n' )
for l in lines :
if l :
var_id , string = l . split ( '=' )
tmp = dict ( var_id = var_id , string = string )
data . append ( tmp )
return data
else :
return data |
def parse_form ( self , req , name , field ) :
"""Pull a form value from the request .
. . note : :
The request stream will be read and left at EOF .""" | form = self . _cache . get ( "form" )
if form is None :
self . _cache [ "form" ] = form = parse_form_body ( req )
return core . get_value ( form , name , field ) |
def get_moments ( metricParams , vary_fmax = False , vary_density = None ) :
"""This function will calculate the various integrals ( moments ) that are
needed to compute the metric used in template bank placement and
coincidence .
Parameters
metricParams : metricParameters instance
Structure holding all the options for construction of the metric .
vary _ fmax : boolean , optional ( default False )
If set to False the metric and rotations are calculated once , for the
full range of frequency [ f _ low , f _ upper ) .
If set to True the metric and rotations are calculated multiple times ,
for frequency ranges [ f _ low , f _ low + i * vary _ density ) , where i starts at
1 and runs up until f _ low + ( i + 1 ) * vary _ density > f _ upper .
Thus values greater than f _ upper are * not * computed .
The calculation for the full range [ f _ low , f _ upper ) is also done .
vary _ density : float , optional
If vary _ fmax is True , this will be used in computing the frequency
ranges as described for vary _ fmax .
Returns
None : None
* * THIS FUNCTION RETURNS NOTHING * *
The following will be * * added * * to the metricParams structure
metricParams . moments : Moments structure
This contains the result of all the integrals used in computing the
metrics above . It can be used for the ethinca components calculation ,
or other similar calculations . This is composed of two compound
dictionaries . The first entry indicates which moment is being
calculated and the second entry indicates the upper frequency cutoff
that was used .
In all cases x = f / f0.
For the first entries the options are :
moments [ ' J % d ' % ( i ) ] [ f _ cutoff ]
This stores the integral of
x * * ( ( - i ) / 3 . ) * delta X / PSD ( x )
moments [ ' log % d ' % ( i ) ] [ f _ cutoff ]
This stores the integral of
( numpy . log ( x * * ( 1 . / 3 . ) ) ) x * * ( ( - i ) / 3 . ) * delta X / PSD ( x )
moments [ ' loglog % d ' % ( i ) ] [ f _ cutoff ]
This stores the integral of
( numpy . log ( x * * ( 1 . / 3 . ) ) ) * * 2 x * * ( ( - i ) / 3 . ) * delta X / PSD ( x )
moments [ ' loglog % d ' % ( i ) ] [ f _ cutoff ]
This stores the integral of
( numpy . log ( x * * ( 1 . / 3 . ) ) ) * * 3 x * * ( ( - i ) / 3 . ) * delta X / PSD ( x )
moments [ ' loglog % d ' % ( i ) ] [ f _ cutoff ]
This stores the integral of
( numpy . log ( x * * ( 1 . / 3 . ) ) ) * * 4 x * * ( ( - i ) / 3 . ) * delta X / PSD ( x )
The second entry stores the frequency cutoff used when computing
the integral . See description of the vary _ fmax option above .
All of these values are nomralized by a factor of
x * * ( ( - 7 ) / 3 . ) * delta X / PSD ( x )
The normalization factor can be obtained in
moments [ ' I7 ' ] [ f _ cutoff ]""" | # NOTE : Unless the TaylorR2F4 metric is used the log ^ 3 and log ^ 4 terms are
# not needed . As this calculation is not too slow compared to bank
# placement we just do this anyway .
psd_amp = metricParams . psd . data
psd_f = numpy . arange ( len ( psd_amp ) , dtype = float ) * metricParams . deltaF
new_f , new_amp = interpolate_psd ( psd_f , psd_amp , metricParams . deltaF )
# Need I7 first as this is the normalization factor
funct = lambda x , f0 : 1
I7 = calculate_moment ( new_f , new_amp , metricParams . fLow , metricParams . fUpper , metricParams . f0 , funct , vary_fmax = vary_fmax , vary_density = vary_density )
# Do all the J moments
moments = { }
moments [ 'I7' ] = I7
for i in range ( - 7 , 18 ) :
funct = lambda x , f0 : x ** ( ( - i + 7 ) / 3. )
moments [ 'J%d' % ( i ) ] = calculate_moment ( new_f , new_amp , metricParams . fLow , metricParams . fUpper , metricParams . f0 , funct , norm = I7 , vary_fmax = vary_fmax , vary_density = vary_density )
# Do the logx multiplied by some power terms
for i in range ( - 1 , 18 ) :
funct = lambda x , f0 : ( numpy . log ( ( x * f0 ) ** ( 1. / 3. ) ) ) * x ** ( ( - i + 7 ) / 3. )
moments [ 'log%d' % ( i ) ] = calculate_moment ( new_f , new_amp , metricParams . fLow , metricParams . fUpper , metricParams . f0 , funct , norm = I7 , vary_fmax = vary_fmax , vary_density = vary_density )
# Do the loglog term
for i in range ( - 1 , 18 ) :
funct = lambda x , f0 : ( numpy . log ( ( x * f0 ) ** ( 1. / 3. ) ) ) ** 2 * x ** ( ( - i + 7 ) / 3. )
moments [ 'loglog%d' % ( i ) ] = calculate_moment ( new_f , new_amp , metricParams . fLow , metricParams . fUpper , metricParams . f0 , funct , norm = I7 , vary_fmax = vary_fmax , vary_density = vary_density )
# Do the logloglog term
for i in range ( - 1 , 18 ) :
funct = lambda x , f0 : ( numpy . log ( ( x * f0 ) ** ( 1. / 3. ) ) ) ** 3 * x ** ( ( - i + 7 ) / 3. )
moments [ 'logloglog%d' % ( i ) ] = calculate_moment ( new_f , new_amp , metricParams . fLow , metricParams . fUpper , metricParams . f0 , funct , norm = I7 , vary_fmax = vary_fmax , vary_density = vary_density )
# Do the logloglog term
for i in range ( - 1 , 18 ) :
funct = lambda x , f0 : ( numpy . log ( ( x * f0 ) ** ( 1. / 3. ) ) ) ** 4 * x ** ( ( - i + 7 ) / 3. )
moments [ 'loglogloglog%d' % ( i ) ] = calculate_moment ( new_f , new_amp , metricParams . fLow , metricParams . fUpper , metricParams . f0 , funct , norm = I7 , vary_fmax = vary_fmax , vary_density = vary_density )
metricParams . moments = moments |
def _cleanup ( self ) -> None :
"""Cleanup unused transports .""" | if self . _cleanup_handle :
self . _cleanup_handle . cancel ( )
now = self . _loop . time ( )
timeout = self . _keepalive_timeout
if self . _conns :
connections = { }
deadline = now - timeout
for key , conns in self . _conns . items ( ) :
alive = [ ]
for proto , use_time in conns :
if proto . is_connected ( ) :
if use_time - deadline < 0 :
transport = proto . transport
proto . close ( )
if ( key . is_ssl and not self . _cleanup_closed_disabled ) :
self . _cleanup_closed_transports . append ( transport )
else :
alive . append ( ( proto , use_time ) )
if alive :
connections [ key ] = alive
self . _conns = connections
if self . _conns :
self . _cleanup_handle = helpers . weakref_handle ( self , '_cleanup' , timeout , self . _loop ) |
def __processUsers ( self ) :
"""Process users of the queue .""" | while self . __usersToProccess . empty ( ) and not self . __end :
pass
while not self . __end or not self . __usersToProccess . empty ( ) :
self . __lockGetUser . acquire ( )
try :
new_user = self . __usersToProccess . get ( False )
except Empty :
self . __lockGetUser . release ( )
return
else :
self . __lockGetUser . release ( )
self . __addUser ( new_user )
self . __logger . info ( "__processUsers:" + str ( self . __usersToProccess . qsize ( ) ) + " users to process" ) |
def global_injector_decorator ( inject_globals ) :
'''Decorator used by the LazyLoader to inject globals into a function at
execute time .
globals
Dictionary with global variables to inject''' | def inner_decorator ( f ) :
@ functools . wraps ( f )
def wrapper ( * args , ** kwargs ) :
with salt . utils . context . func_globals_inject ( f , ** inject_globals ) :
return f ( * args , ** kwargs )
return wrapper
return inner_decorator |
def from_params ( cls , params : Params , instances : Iterable [ 'adi.Instance' ] = None ) : # type : ignore
"""There are two possible ways to build a vocabulary ; from a
collection of instances , using : func : ` Vocabulary . from _ instances ` , or
from a pre - saved vocabulary , using : func : ` Vocabulary . from _ files ` .
You can also extend pre - saved vocabulary with collection of instances
using this method . This method wraps these options , allowing their
specification from a ` ` Params ` ` object , generated from a JSON
configuration file .
Parameters
params : Params , required .
instances : Iterable [ ' adi . Instance ' ] , optional
If ` ` params ` ` doesn ' t contain a ` ` directory _ path ` ` key ,
the ` ` Vocabulary ` ` can be built directly from a collection of
instances ( i . e . a dataset ) . If ` ` extend ` ` key is set False ,
dataset instances will be ignored and final vocabulary will be
one loaded from ` ` directory _ path ` ` . If ` ` extend ` ` key is set True ,
dataset instances will be used to extend the vocabulary loaded
from ` ` directory _ path ` ` and that will be final vocabulary used .
Returns
A ` ` Vocabulary ` ` .""" | # pylint : disable = arguments - differ
# Vocabulary is ` ` Registrable ` ` so that you can configure a custom subclass ,
# but ( unlike most of our registrables ) almost everyone will want to use the
# base implementation . So instead of having an abstract ` ` VocabularyBase ` ` or
# such , we just add the logic for instantiating a registered subclass here ,
# so that most users can continue doing what they were doing .
vocab_type = params . pop ( "type" , None )
if vocab_type is not None :
return cls . by_name ( vocab_type ) . from_params ( params = params , instances = instances )
extend = params . pop ( "extend" , False )
vocabulary_directory = params . pop ( "directory_path" , None )
if not vocabulary_directory and not instances :
raise ConfigurationError ( "You must provide either a Params object containing a " "vocab_directory key or a Dataset to build a vocabulary from." )
if extend and not instances :
raise ConfigurationError ( "'extend' is true but there are not instances passed to extend." )
if extend and not vocabulary_directory :
raise ConfigurationError ( "'extend' is true but there is not 'directory_path' to extend from." )
if vocabulary_directory and instances :
if extend :
logger . info ( "Loading Vocab from files and extending it with dataset." )
else :
logger . info ( "Loading Vocab from files instead of dataset." )
if vocabulary_directory :
vocab = cls . from_files ( vocabulary_directory )
if not extend :
params . assert_empty ( "Vocabulary - from files" )
return vocab
if extend :
vocab . extend_from_instances ( params , instances = instances )
return vocab
min_count = params . pop ( "min_count" , None )
max_vocab_size = pop_max_vocab_size ( params )
non_padded_namespaces = params . pop ( "non_padded_namespaces" , DEFAULT_NON_PADDED_NAMESPACES )
pretrained_files = params . pop ( "pretrained_files" , { } )
min_pretrained_embeddings = params . pop ( "min_pretrained_embeddings" , None )
only_include_pretrained_words = params . pop_bool ( "only_include_pretrained_words" , False )
tokens_to_add = params . pop ( "tokens_to_add" , None )
params . assert_empty ( "Vocabulary - from dataset" )
return cls . from_instances ( instances = instances , min_count = min_count , max_vocab_size = max_vocab_size , non_padded_namespaces = non_padded_namespaces , pretrained_files = pretrained_files , only_include_pretrained_words = only_include_pretrained_words , tokens_to_add = tokens_to_add , min_pretrained_embeddings = min_pretrained_embeddings ) |
def avoid_parallel_execution ( func ) :
"""A decorator to avoid the parallel execution of a function .
If the function is currently called , the second call is just skipped .
: param func : The function to decorate
: return :""" | def func_wrapper ( * args , ** kwargs ) :
if not getattr ( func , "currently_executing" , False ) :
func . currently_executing = True
try :
return func ( * args , ** kwargs )
finally :
func . currently_executing = False
else :
logger . verbose ( "Avoid parallel execution of function {}" . format ( func ) )
return func_wrapper |
def get_async ( cls , blob_key , ** ctx_options ) :
"""Async version of get ( ) .""" | if not isinstance ( blob_key , ( BlobKey , basestring ) ) :
raise TypeError ( 'Expected blob key, got %r' % ( blob_key , ) )
if 'parent' in ctx_options :
raise TypeError ( 'Parent is not supported' )
return cls . get_by_id_async ( str ( blob_key ) , ** ctx_options ) |
def html2man ( data , formatter ) :
"""Convert HTML text from cplusplus . com to man pages .""" | groff_text = formatter ( data )
man_text = groff2man ( groff_text )
return man_text |
def from_protobuf ( cls , proto : SaveStateProto ) -> SaveState :
"""Constructor from protobuf . Can raise ValueErrors from called from _ protobuf ( ) parsers .
: param proto : protobuf structure
: type proto : ~ unidown . plugin . protobuf . save _ state _ pb2 . SaveStateProto
: return : the SaveState
: rtype : ~ unidown . plugin . save _ state . SaveState
: raises ValueError : version of SaveState does not exist or is empty inside the protobuf
: raises ~ packaging . version . InvalidVersion : version is not PEP440 conform""" | data_dict = { }
for key , link_item in proto . data . items ( ) :
data_dict [ key ] = LinkItem . from_protobuf ( link_item )
if proto . version == "" :
raise ValueError ( "version of SaveState does not exist or is empty inside the protobuf." )
try :
version = Version ( proto . version )
except InvalidVersion :
raise InvalidVersion ( f"Plugin version is not PEP440 conform: {proto.version}" )
return cls ( version , PluginInfo . from_protobuf ( proto . plugin_info ) , Timestamp . ToDatetime ( proto . last_update ) , data_dict ) |
def get_country_info_from_m49 ( cls , m49 , use_live = True , exception = None ) : # type : ( int , bool , Optional [ ExceptionUpperBound ] ) - > Optional [ Dict [ str ] ]
"""Get country name from M49 code
Args :
m49 ( int ) : M49 numeric code for which to get country information
use _ live ( bool ) : Try to get use latest data from web rather than file in package . Defaults to True .
exception ( Optional [ ExceptionUpperBound ] ) : An exception to raise if country not found . Defaults to None .
Returns :
Optional [ Dict [ str ] ] : Country information""" | iso3 = cls . get_iso3_from_m49 ( m49 , use_live = use_live , exception = exception )
if iso3 is not None :
return cls . get_country_info_from_iso3 ( iso3 , exception = exception )
return None |
def _ErrorOfDifferences ( self , cov , warning_cutoff = 1.0e-10 ) :
"""inputs :
cov is the covariance matrix of A
returns the statistical error matrix of A _ i - A _ j""" | diag = np . matrix ( cov . diagonal ( ) )
d2 = diag + diag . transpose ( ) - 2 * cov
# Cast warning _ cutoff to compare a negative number
cutoff = - abs ( warning_cutoff )
# check for any numbers below zero .
if np . any ( d2 < 0.0 ) :
if np . any ( d2 < cutoff ) :
print ( "A squared uncertainty is negative. Largest Magnitude = {0:f}" . format ( abs ( np . min ( d2 [ d2 < cutoff ] ) ) ) )
else :
d2 [ np . logical_and ( 0 > d2 , d2 > cutoff ) ] = 0.0
return np . sqrt ( np . array ( d2 ) ) |
def list_ ( saltenv = 'base' , test = None ) :
'''List currently configured reactors
CLI Example :
. . code - block : : bash
salt - run reactor . list''' | sevent = salt . utils . event . get_event ( 'master' , __opts__ [ 'sock_dir' ] , __opts__ [ 'transport' ] , opts = __opts__ , listen = True )
master_key = salt . utils . master . get_master_key ( 'root' , __opts__ )
__jid_event__ . fire_event ( { 'key' : master_key } , 'salt/reactors/manage/list' )
results = sevent . get_event ( wait = 30 , tag = 'salt/reactors/manage/list-results' )
reactors = results [ 'reactors' ]
return reactors |
def select ( self , fields , ** exprs ) :
"""Create a new table containing a subset of attributes , with optionally
newly - added fields computed from each rec in the original table .
@ param fields : list of strings , or single space - delimited string , listing attribute name to be included in the
output
@ type fields : list , or space - delimited string
@ param exprs : one or more named callable arguments , to compute additional fields using the given function
@ type exprs : C { name = callable } , callable takes the record as an argument , and returns the new attribute value
If a string is passed as a callable , this string will be used using string formatting , given the record
as a source of interpolation values . For instance , C { fullName = ' % ( lastName ) s , % ( firstName ) s ' }""" | fields = self . _parse_fields_string ( fields )
def _make_string_callable ( expr ) :
if isinstance ( expr , basestring ) :
return lambda r : expr % r
else :
return expr
exprs = dict ( ( k , _make_string_callable ( v ) ) for k , v in exprs . items ( ) )
raw_tuples = [ ]
for ob in self . obs :
attrvalues = tuple ( getattr ( ob , fieldname , None ) for fieldname in fields )
if exprs :
attrvalues += tuple ( expr ( ob ) for expr in exprs . values ( ) )
raw_tuples . append ( attrvalues )
all_names = tuple ( fields ) + tuple ( exprs . keys ( ) )
ret = Table ( )
ret . _indexes . update ( dict ( ( k , v . copy_template ( ) ) for k , v in self . _indexes . items ( ) if k in all_names ) )
return ret ( ) . insert_many ( DataObject ( ** dict ( zip ( all_names , outtuple ) ) ) for outtuple in raw_tuples ) |
def tuple_replace ( tup , * pairs ) :
"""Return a copy of a tuple with some elements replaced .
: param tup : The tuple to be copied .
: param pairs : Any number of ( index , value ) tuples where index is the index
of the item to replace and value is the new value of the item .""" | tuple_list = list ( tup )
for index , value in pairs :
tuple_list [ index ] = value
return tuple ( tuple_list ) |
def score_hist ( df , columns = None , groupby = None , threshold = 0.7 , stacked = True , bins = 20 , percent = True , alpha = 0.33 , show = True , block = False , save = False ) :
"""Plot multiple histograms on one plot , typically of " score " values between 0 and 1
Typically the groupby or columns of the dataframe are the classification categories ( 0 , . 5 , 1)
And the values are scores between 0 and 1.""" | df = df if columns is None else df [ ( [ ] if groupby is None else [ groupby ] ) + list ( columns ) ] . copy ( )
if groupby is not None or threshold is not None :
df = groups_from_scores ( df , groupby = groupby , threshold = threshold )
percent = 100. if percent else 1.
if isinstance ( df , pd . core . groupby . DataFrameGroupBy ) :
df = df_from_groups ( df , columns = columns ) * percent
columns = df . columns if columns is None else columns
if bins is None :
bins = 20
if isinstance ( bins , int ) :
bins = np . linspace ( np . min ( df . min ( ) ) , np . max ( df . max ( ) ) , bins )
log . debug ( 'bins: {}' . format ( bins ) )
figs = [ ]
df . plot ( kind = 'hist' , alpha = alpha , stacked = stacked , bins = bins )
# for col in df . columns :
# series = df [ col ] * percent
# log . debug ( ' { } ' . format ( series ) )
# figs . append ( plt . hist ( series , bins = bins , alpha = alpha ,
# weights = percent * np . ones _ like ( series ) / len ( series . dropna ( ) ) ,
# label = stringify ( col ) ) )
plt . legend ( )
plt . xlabel ( 'Score (%)' )
plt . ylabel ( 'Percent' )
plt . title ( '{} Scores for {}' . format ( np . sum ( df . count ( ) ) , columns ) )
plt . draw ( )
if save or not show :
fig = plt . gcf ( )
today = datetime . datetime . today ( )
fig . savefig ( os . path . join ( IMAGES_PATH , 'score_hist_{:04d}-{:02d}-{:02d}_{:02d}{:02d}.jpg' . format ( * today . timetuple ( ) ) ) )
if show :
plt . show ( block = block )
return figs |
def process_once ( self , timeout = 0.01 ) :
"""Handles an event and calls it ' s handler
Optional arguments :
* timeout = 0.01 - Wait for an event until the timeout is reached .""" | try :
event = self . recv ( timeout )
if event :
event_t = event [ 0 ]
event_c = event [ 1 ]
if event_t == 'JOIN' :
self . on_join ( event_c [ 0 ] , event_c [ 1 ] )
elif event_t == 'PART' :
self . on_part ( event_c [ 0 ] , event_c [ 1 ] , event_c [ 2 ] )
elif event_t == 'PRIVMSG' :
if event_c [ 1 ] in self . channels . keys ( ) :
self . on_chanmsg ( event_c [ 0 ] , event_c [ 1 ] , event_c [ 2 ] )
else :
self . on_privmsg ( event_c [ 0 ] , event_c [ 2 ] )
elif event_t == 'NOTICE' :
if event_c [ 1 ] in self . channels . keys ( ) :
self . on_channotice ( event_c [ 0 ] , event_c [ 1 ] , event_c [ 2 ] )
else :
self . on_privnotice ( event_c [ 0 ] , event_c [ 2 ] )
elif event_t == 'CTCP' :
if event_c [ 1 ] in self . channels . keys ( ) :
self . on_chanctcp ( event_c [ 0 ] , event_c [ 1 ] , event_c [ 2 ] )
else :
self . on_privctcp ( event_c [ 0 ] , event_c [ 2 ] )
elif event_t == 'CTCP_REPLY' :
self . on_ctcp_reply ( event_c [ 0 ] , event_c [ 2 ] )
elif event_t == 'MODE' :
if event_c [ 0 ] [ 0 ] == self . current_nick :
self . on_umode ( event_c [ 1 ] )
else :
self . on_cmode ( event_c [ 0 ] , event_c [ 1 ] , event_c [ 2 ] )
elif event_t == 'KICK' :
self . on_kick ( event_c [ 0 ] , event_c [ 1 ] , event_c [ 2 ] , event_c [ 3 ] )
elif event_t == 'INVITE' :
self . on_invite ( event_c [ 0 ] , event_c [ 2 ] )
elif event_t == 'NICK' :
self . on_nick ( event_c [ 0 ] , event_c [ 1 ] )
elif event_t == 'TOPIC' :
self . on_topic ( event_c [ 0 ] , event_c [ 1 ] , event_c [ 2 ] )
elif event_t == 'QUIT' :
self . on_quit ( event_c [ 0 ] , event_c [ 1 ] )
elif event_t == 'LUSERS' :
self . on_lusers ( event_c )
elif event_t == 'ERROR' :
self . on_error ( event_c [ 0 ] )
elif event_t == 'UNKNOWN' :
self . on_unknown ( event_c [ 0 ] )
except self . LurklibError as exception :
self . on_exception ( exception ) |
def _exec ( cmd ) :
"""Execute command using subprocess . Popen
: param cmd :
: return : ( code , stdout , stderr )""" | process = subprocess . Popen ( cmd , stderr = subprocess . PIPE , stdout = subprocess . PIPE )
# pylint : disable = unexpected - keyword - arg
( stdout , stderr ) = process . communicate ( timeout = defaults . DEFAULT_VCS_TIMEOUT )
return process . returncode , stdout . decode ( ) , stderr . decode ( ) |
def alreadyHasEntry ( oldClassString , og ) :
"""Return true if there is already an owl : Class with the old id""" | namespace = oldClassString . split ( ':' ) [ 0 ]
if namespace == 'http' :
target = rdflib . URIRef ( oldClassString )
print ( 'OLD CLASS ID IS A URL' , oldClassString )
else :
try :
og . add_known_namespaces ( namespace )
target = og . expand ( oldClassString )
except KeyError :
print ( 'MISSING NAMESPACE' , namespace , oldClassString )
return True
# we only want known namespaces
return ( target , rdf . type , owl . Class ) in og . g |
def _set_verbose ( self , verbose ) :
"""Check and set our : data : ` verbose ` attribute .
The debug - level must be a string or an integer . If it is one of
the allowed strings , GnuPG will translate it internally to it ' s
corresponding integer level :
basic = 1-2
advanced = 3-5
expert = 6-8
guru = 9 +
If it ' s not one of the recognised string levels , then then
entire argument is ignored by GnuPG . : (
To fix that stupid behaviour , if they wanted debugging but typo ' d
the string level ( or specified ` ` verbose = True ` ` ) , we ' ll default to
' basic ' logging .""" | string_levels = ( 'basic' , 'advanced' , 'expert' , 'guru' )
if verbose is True : # The caller wants logging , but we need a valid - - debug - level
# for gpg . Default to " basic " , and warn about the ambiguity .
verbose = 'basic'
if ( isinstance ( verbose , str ) and not ( verbose in string_levels ) ) :
verbose = 'basic'
self . verbose = verbose |
def cmd_init ( self , * args ) :
'''Create a initial buildozer . spec in the current directory''' | if exists ( 'buildozer.spec' ) :
print ( 'ERROR: You already have a buildozer.spec file.' )
exit ( 1 )
copyfile ( join ( dirname ( __file__ ) , 'default.spec' ) , 'buildozer.spec' )
print ( 'File buildozer.spec created, ready to customize!' ) |
def read_header ( filename ) :
'''returns a dictionary of values in the header of the given file''' | header = { }
in_header = False
data = nl . universal_read ( filename )
lines = [ x . strip ( ) for x in data . split ( '\n' ) ]
for line in lines :
if line == "*** Header Start ***" :
in_header = True
continue
if line == "*** Header End ***" :
return header
fields = line . split ( ": " )
if len ( fields ) == 2 :
header [ fields [ 0 ] ] = fields [ 1 ] |
def import_model ( self , name , path = "floyd.db.models" ) :
"""imports a model of name from path , returning from local model
cache if it has been previously loaded otherwise importing""" | if name in self . _model_cache :
return self . _model_cache [ name ]
try :
model = getattr ( __import__ ( path , None , None , [ name ] ) , name )
self . _model_cache [ name ] = model
except ImportError :
return False
return model |
def init_live_reload ( run ) :
"""Start the live reload task
: param run : run the task inside of this function or just create it""" | from asyncio import get_event_loop
from . _live_reload import start_child
loop = get_event_loop ( )
if run :
loop . run_until_complete ( start_child ( ) )
else :
get_event_loop ( ) . create_task ( start_child ( ) ) |
def label_geometry_measures ( label_image , intensity_image = None ) :
"""Wrapper for the ANTs funtion labelGeometryMeasures
ANTsR function : ` labelGeometryMeasures `
Arguments
label _ image : ANTsImage
image on which to compute geometry
intensity _ image : ANTsImage ( optional )
image with intensity values
Returns
pandas . DataFrame
Example
> > > import ants
> > > fi = ants . image _ read ( ants . get _ ants _ data ( ' r16 ' ) )
> > > seg = ants . kmeans _ segmentation ( fi , 3 ) [ ' segmentation ' ]
> > > geom = ants . label _ geometry _ measures ( seg , fi )""" | if intensity_image is None :
intensity_image = label_image . clone ( )
outcsv = mktemp ( suffix = '.csv' )
veccer = [ label_image . dimension , label_image , intensity_image , outcsv ]
veccer_processed = utils . _int_antsProcessArguments ( veccer )
libfn = utils . get_lib_fn ( 'LabelGeometryMeasures' )
pp = libfn ( veccer_processed )
pp = pd . read_csv ( outcsv )
pp [ 'Label' ] = np . sort ( np . unique ( label_image [ label_image > 0 ] ) ) . astype ( 'int' )
pp_cols = pp . columns . values
pp_cols [ 1 ] = 'VolumeInMillimeters'
pp . columns = pp_cols
spc = np . prod ( label_image . spacing )
pp [ 'VolumeInMillimeters' ] = pp [ 'VolumeInMillimeters' ] * spc
return pp |
def is_port_default ( self ) :
'''Return whether the URL is using the default port .''' | if self . scheme in RELATIVE_SCHEME_DEFAULT_PORTS :
return RELATIVE_SCHEME_DEFAULT_PORTS [ self . scheme ] == self . port |
def sync ( self , vault_client ) :
"""Synchronizes the local and remote Vault resources . Has the net
effect of adding backend if needed""" | if self . present :
if not self . existing :
LOG . info ( "Mounting %s backend on %s" , self . backend , self . path )
self . actually_mount ( vault_client )
else :
LOG . info ( "%s backend already mounted on %s" , self . backend , self . path )
else :
if self . existing :
LOG . info ( "Unmounting %s backend on %s" , self . backend , self . path )
self . unmount ( vault_client )
else :
LOG . info ( "%s backend already unmounted on %s" , self . backend , self . path )
if self . present and vault_client . version :
self . sync_tunables ( vault_client ) |
def var_fmpt ( P ) :
"""Variances of first mean passage times for an ergodic transition
probability matrix .
Parameters
P : array
( k , k ) , an ergodic Markov transition probability matrix .
Returns
: array
( k , k ) , elements are the variances for the number of intervals
required for a chain starting in state i to first enter state j .
Examples
> > > import numpy as np
> > > from giddy . ergodic import var _ fmpt
> > > p = np . array ( [ [ . 5 , . 25 , . 25 ] , [ . 5,0 , . 5 ] , [ . 25 , . 25 , . 5 ] ] )
> > > vfm = var _ fmpt ( p )
> > > vfm
array ( [ [ 5.5833333 , 12 . , 6.888889 ] ,
[ 6.22222 , 12 . , 6.22222 ] ,
[ 6.888889 , 12 . , 5.5833333 ] ] )
Notes
Uses formulation ( and examples on p . 83 ) in : cite : ` Kemeny1967 ` .""" | P = np . matrix ( P )
A = P ** 1000
n , k = A . shape
I = np . identity ( k )
Z = la . inv ( I - P + A )
E = np . ones_like ( Z )
D = np . diag ( 1. / np . diag ( A ) )
Zdg = np . diag ( np . diag ( Z ) )
M = ( I - Z + E * Zdg ) * D
ZM = Z * M
ZMdg = np . diag ( np . diag ( ZM ) )
W = M * ( 2 * Zdg * D - I ) + 2 * ( ZM - E * ZMdg )
return np . array ( W - np . multiply ( M , M ) ) |
def get_json_data ( latitude = 52.091579 , longitude = 5.119734 ) :
"""Get buienradar json data and return results .""" | final_result = { SUCCESS : False , MESSAGE : None , CONTENT : None , RAINCONTENT : None }
log . info ( "Getting buienradar json data for latitude=%s, longitude=%s" , latitude , longitude )
result = __get_ws_data ( )
if result [ SUCCESS ] : # store json data :
final_result [ CONTENT ] = result [ CONTENT ]
final_result [ SUCCESS ] = True
else :
if STATUS_CODE in result and MESSAGE in result :
msg = "Status: %d, Msg: %s" % ( result [ STATUS_CODE ] , result [ MESSAGE ] )
elif MESSAGE in result :
msg = "Msg: %s" % ( result [ MESSAGE ] )
else :
msg = "Something went wrong (reason unknown)."
log . warning ( msg )
final_result [ MESSAGE ] = msg
# load forecasted precipitation :
result = __get_precipfc_data ( latitude , longitude )
if result [ SUCCESS ] :
final_result [ RAINCONTENT ] = result [ CONTENT ]
else :
if STATUS_CODE in result and MESSAGE in result :
msg = "Status: %d, Msg: %s" % ( result [ STATUS_CODE ] , result [ MESSAGE ] )
elif MESSAGE in result :
msg = "Msg: %s" % ( result [ MESSAGE ] )
else :
msg = "Something went wrong (reason unknown)."
log . warning ( msg )
final_result [ MESSAGE ] = msg
return final_result |
def _iter_branch ( self , node ) :
"""yield ( key , value ) stored in this and the descendant nodes
: param node : node in form of list , or BLANK _ NODE
. . note : :
Here key is in full form , rather than key of the individual node""" | if node == BLANK_NODE :
raise StopIteration
node_type = self . _get_node_type ( node )
if is_key_value_type ( node_type ) :
nibbles = key_nibbles_from_key_value_node ( node )
key = b'+' . join ( [ to_string ( x ) for x in nibbles ] )
if node_type == NODE_TYPE_EXTENSION :
sub_tree = self . _iter_branch ( self . _get_inner_node_from_extension ( node ) )
else :
sub_tree = [ ( to_string ( NIBBLE_TERMINATOR ) , node [ 1 ] ) ]
# prepend key of this node to the keys of children
for sub_key , sub_value in sub_tree :
full_key = ( key + b'+' + sub_key ) . strip ( b'+' )
yield ( full_key , sub_value )
elif node_type == NODE_TYPE_BRANCH :
for i in range ( 16 ) :
sub_tree = self . _iter_branch ( self . _decode_to_node ( node [ i ] ) )
for sub_key , sub_value in sub_tree :
full_key = ( str_to_bytes ( str ( i ) ) + b'+' + sub_key ) . strip ( b'+' )
yield ( full_key , sub_value )
if node [ 16 ] :
yield ( to_string ( NIBBLE_TERMINATOR ) , node [ - 1 ] ) |
def has_permission ( cls , user ) :
"""We override this method to customize the way permissions are checked .
Using our roles to check permissions .""" | # no login is needed , so its always fine
if not cls . requires_login :
return True
# if user is somehow not logged in
if not user . is_authenticated :
return False
# attribute permission _ required is mandatory , returns tuple
perms = cls . get_permission_required ( )
# if perms are defined and empty , we skip checking
if not perms :
return True
# get role of user , skip admin role
role = user . urole . role . name
if role == cls . ADMIN :
return True
# check if at least one permissions is valid
for permission in perms :
if cls . check_permission ( role , permission ) :
return True
# permission denied
return False |
def max_normal_germline_depth ( in_file , params , somatic_info ) :
"""Calculate threshold for excluding potential heterozygotes based on normal depth .""" | bcf_in = pysam . VariantFile ( in_file )
depths = [ ]
for rec in bcf_in :
stats = _is_possible_loh ( rec , bcf_in , params , somatic_info )
if tz . get_in ( [ "normal" , "depth" ] , stats ) :
depths . append ( tz . get_in ( [ "normal" , "depth" ] , stats ) )
if depths :
return np . median ( depths ) * NORMAL_FILTER_PARAMS [ "max_depth_percent" ] |
def solar_azimuth ( self , dateandtime = None ) :
"""Calculates the solar azimuth angle for a specific date / time .
: param dateandtime : The date and time for which to calculate the angle .
: type dateandtime : : class : ` ~ datetime . datetime `
: returns : The azimuth angle in degrees clockwise from North .
: rtype : float""" | if self . astral is None :
self . astral = Astral ( )
if dateandtime is None :
dateandtime = datetime . datetime . now ( self . tz )
elif not dateandtime . tzinfo :
dateandtime = self . tz . localize ( dateandtime )
dateandtime = dateandtime . astimezone ( pytz . UTC )
return self . astral . solar_azimuth ( dateandtime , self . latitude , self . longitude ) |
def open ( self ) :
"""Open an existing database""" | if self . _table_exists ( ) :
self . mode = "open"
# get table info
self . _get_table_info ( )
self . types = dict ( [ ( f [ 0 ] , self . conv_func [ f [ 1 ] . upper ( ) ] ) for f in self . fields if f [ 1 ] . upper ( ) in self . conv_func ] )
return self
else : # table not found
raise IOError , "Table %s doesn't exist" % self . name |
def get_task ( self , patient_id , task_id ) :
"""invokes TouchWorksMagicConstants . ACTION _ GET _ ENCOUNTER _ LIST _ FOR _ PATIENT action
: return : JSON response""" | magic = self . _magic_json ( action = TouchWorksMagicConstants . ACTION_GET_TASK , patient_id = patient_id , parameter1 = task_id )
response = self . _http_request ( TouchWorksEndPoints . MAGIC_JSON , data = magic )
result = self . _get_results_or_raise_if_magic_invalid ( magic , response , TouchWorksMagicConstants . RESULT_GET_TASK )
return result |
def span ( self ) :
"""Return a contiguous range that is a superset of this range .
Returns :
A VersionRange object representing the span of this range . For
example , the span of " 2 + < 4 | 6 + < 8 " would be " 2 + < 8 " .""" | other = VersionRange ( None )
bound = _Bound ( self . bounds [ 0 ] . lower , self . bounds [ - 1 ] . upper )
other . bounds = [ bound ]
return other |
def parse_connection_setup ( self ) :
"""Internal function used to parse connection setup response .""" | # Only the ConnectionSetupRequest has been sent so far
r = self . sent_requests [ 0 ]
while 1 : # print ' data _ send : ' , repr ( self . data _ send )
# print ' data _ recv : ' , repr ( self . data _ recv )
if r . _data :
alen = r . _data [ 'additional_length' ] * 4
# The full response haven ' t arrived yet
if len ( self . data_recv ) < alen :
return 0
# Connection failed or further authentication is needed .
# Set reason to the reason string
if r . _data [ 'status' ] != 1 :
r . _data [ 'reason' ] = self . data_recv [ : r . _data [ 'reason_length' ] ]
# Else connection succeeded , parse the reply
else :
x , d = r . _success_reply . parse_binary ( self . data_recv [ : alen ] , self , rawdict = 1 )
r . _data . update ( x )
del self . sent_requests [ 0 ]
self . data_recv = self . data_recv [ alen : ]
return 1
else : # The base reply is 8 bytes long
if len ( self . data_recv ) < 8 :
return 0
r . _data , d = r . _reply . parse_binary ( self . data_recv [ : 8 ] , self , rawdict = 1 )
self . data_recv = self . data_recv [ 8 : ] |
def ChromeContext ( * args , ** kwargs ) :
'''Context manager for conveniently handling the lifetime of the underlying chromium instance .
In general , this should be the preferred way to use an instance of ` ChromeRemoteDebugInterface ` .
All parameters are forwarded through to the underlying ChromeRemoteDebugInterface ( ) constructor .''' | log = logging . getLogger ( "Main.ChromeController.ChromeContext" )
chrome_created = False
try :
chrome_instance = ChromeRemoteDebugInterface ( * args , ** kwargs )
chrome_created = True
log . info ( "Entering chrome context" )
yield chrome_instance
except Exception as e :
log . error ( "Exception in chrome context!" )
for line in traceback . format_exc ( ) . split ( "\n" ) :
log . error ( line )
raise e
finally :
log . info ( "Exiting chrome context" )
if chrome_created :
chrome_instance . close ( ) |
def _plane2col ( plane ) :
'''take a string like ' xy ' , and return the indices from COLS . *''' | planes = ( 'xy' , 'yx' , 'xz' , 'zx' , 'yz' , 'zy' )
assert plane in planes , 'No such plane found! Please select one of: ' + str ( planes )
return ( getattr ( COLS , plane [ 0 ] . capitalize ( ) ) , getattr ( COLS , plane [ 1 ] . capitalize ( ) ) , ) |
def get_related_models ( cls , model ) :
"""Get a dictionary with related structure models for given class or model :
> > SupportedServices . get _ related _ models ( gitlab _ models . Project )
' service ' : nodeconductor _ gitlab . models . GitLabService ,
' service _ project _ link ' : nodeconductor _ gitlab . models . GitLabServiceProjectLink ,
' resources ' : [
nodeconductor _ gitlab . models . Group ,
nodeconductor _ gitlab . models . Project ,""" | from waldur_core . structure . models import ServiceSettings
if isinstance ( model , ServiceSettings ) :
model_str = cls . _registry . get ( model . type , { } ) . get ( 'model_name' , '' )
else :
model_str = cls . _get_model_str ( model )
for models in cls . get_service_models ( ) . values ( ) :
if model_str == cls . _get_model_str ( models [ 'service' ] ) or model_str == cls . _get_model_str ( models [ 'service_project_link' ] ) :
return models
for resource_model in models [ 'resources' ] :
if model_str == cls . _get_model_str ( resource_model ) :
return models |
def verify ( self , subject , signature = None ) :
"""Verify a subject with a signature using this key .
: param subject : The subject to verify
: type subject : ` ` str ` ` , ` ` unicode ` ` , ` ` None ` ` , : py : obj : ` PGPMessage ` , : py : obj : ` PGPKey ` , : py : obj : ` PGPUID `
: param signature : If the signature is detached , it should be specified here .
: type signature : : py : obj : ` PGPSignature `
: returns : : py : obj : ` ~ pgpy . types . SignatureVerification `""" | sspairs = [ ]
# some type checking
if not isinstance ( subject , ( type ( None ) , PGPMessage , PGPKey , PGPUID , PGPSignature , six . string_types , bytes , bytearray ) ) :
raise TypeError ( "Unexpected subject value: {:s}" . format ( str ( type ( subject ) ) ) )
if not isinstance ( signature , ( type ( None ) , PGPSignature ) ) :
raise TypeError ( "Unexpected signature value: {:s}" . format ( str ( type ( signature ) ) ) )
def _filter_sigs ( sigs ) :
_ids = { self . fingerprint . keyid } | set ( self . subkeys )
return [ sig for sig in sigs if sig . signer in _ids ]
# collect signature ( s )
if signature is None :
if isinstance ( subject , PGPMessage ) :
sspairs += [ ( sig , subject . message ) for sig in _filter_sigs ( subject . signatures ) ]
if isinstance ( subject , ( PGPUID , PGPKey ) ) :
sspairs += [ ( sig , subject ) for sig in _filter_sigs ( subject . __sig__ ) ]
if isinstance ( subject , PGPKey ) : # user ids
sspairs += [ ( sig , uid ) for uid in subject . userids for sig in _filter_sigs ( uid . __sig__ ) ]
# user attributes
sspairs += [ ( sig , ua ) for ua in subject . userattributes for sig in _filter_sigs ( ua . __sig__ ) ]
# subkey binding signatures
sspairs += [ ( sig , subkey ) for subkey in subject . subkeys . values ( ) for sig in _filter_sigs ( subkey . __sig__ ) ]
elif signature . signer in { self . fingerprint . keyid } | set ( self . subkeys ) :
sspairs += [ ( signature , subject ) ]
if len ( sspairs ) == 0 :
raise PGPError ( "No signatures to verify" )
# finally , start verifying signatures
sigv = SignatureVerification ( )
for sig , subj in sspairs :
if self . fingerprint . keyid != sig . signer and sig . signer in self . subkeys :
warnings . warn ( "Signature was signed with this key's subkey: {:s}. " "Verifying with subkey..." . format ( sig . signer ) , stacklevel = 2 )
sigv &= self . subkeys [ sig . signer ] . verify ( subj , sig )
else :
verified = self . _key . verify ( sig . hashdata ( subj ) , sig . __sig__ , getattr ( hashes , sig . hash_algorithm . name ) ( ) )
if verified is NotImplemented :
raise NotImplementedError ( sig . key_algorithm )
sigv . add_sigsubj ( sig , self . fingerprint . keyid , subj , verified )
return sigv |
def _to_narrow ( self , terms , data , mask , dates , assets ) :
"""Convert raw computed pipeline results into a DataFrame for public APIs .
Parameters
terms : dict [ str - > Term ]
Dict mapping column names to terms .
data : dict [ str - > ndarray [ ndim = 2 ] ]
Dict mapping column names to computed results for those names .
mask : ndarray [ bool , ndim = 2]
Mask array of values to keep .
dates : ndarray [ datetime64 , ndim = 1]
Row index for arrays ` data ` and ` mask `
assets : ndarray [ int64 , ndim = 2]
Column index for arrays ` data ` and ` mask `
Returns
results : pd . DataFrame
The indices of ` results ` are as follows :
index : two - tiered MultiIndex of ( date , asset ) .
Contains an entry for each ( date , asset ) pair corresponding to
a ` True ` value in ` mask ` .
columns : Index of str
One column per entry in ` data ` .
If mask [ date , asset ] is True , then result . loc [ ( date , asset ) , colname ]
will contain the value of data [ colname ] [ date , asset ] .""" | if not mask . any ( ) : # Manually handle the empty DataFrame case . This is a workaround
# to pandas failing to tz _ localize an empty dataframe with a
# MultiIndex . It also saves us the work of applying a known - empty
# mask to each array .
# Slicing ` dates ` here to preserve pandas metadata .
empty_dates = dates [ : 0 ]
empty_assets = array ( [ ] , dtype = object )
return DataFrame ( data = { name : array ( [ ] , dtype = arr . dtype ) for name , arr in iteritems ( data ) } , index = MultiIndex . from_arrays ( [ empty_dates , empty_assets ] ) , )
resolved_assets = array ( self . _finder . retrieve_all ( assets ) )
dates_kept = repeat_last_axis ( dates . values , len ( assets ) ) [ mask ]
assets_kept = repeat_first_axis ( resolved_assets , len ( dates ) ) [ mask ]
final_columns = { }
for name in data : # Each term that computed an output has its postprocess method
# called on the filtered result .
# As of Mon May 2 15:38:47 2016 , we only use this to convert
# LabelArrays into categoricals .
final_columns [ name ] = terms [ name ] . postprocess ( data [ name ] [ mask ] )
return DataFrame ( data = final_columns , index = MultiIndex . from_arrays ( [ dates_kept , assets_kept ] ) , ) . tz_localize ( 'UTC' , level = 0 ) |
def sigres_path ( self ) :
"""Absolute path of the SIGRES file . Empty string if file is not present .""" | # Lazy property to avoid multiple calls to has _ abiext .
try :
return self . _sigres_path
except AttributeError :
path = self . outdir . has_abiext ( "SIGRES" )
if path :
self . _sigres_path = path
return path |
def hexbin ( x , y , size , orientation = "pointytop" , aspect_scale = 1 ) :
'''Perform an equal - weight binning of data points into hexagonal tiles .
For more sophisticated use cases , e . g . weighted binning or scaling
individual tiles proportional to some other quantity , consider using
HoloViews .
Args :
x ( array [ float ] ) :
A NumPy array of x - coordinates for binning
y ( array [ float ] ) :
A NumPy array of y - coordinates for binning
size ( float ) :
The size of the hexagonal tiling .
The size is defined as the distance from the center of a hexagon
to the top corner for " pointytop " orientation , or from the center
to a side corner for " flattop " orientation .
orientation ( str , optional ) :
Whether the hex tile orientation should be " pointytop " or
" flattop " . ( default : " pointytop " )
aspect _ scale ( float , optional ) :
Match a plot ' s aspect ratio scaling .
When working with a plot with ` ` aspect _ scale ! = 1 ` ` , this
parameter can be set to match the plot , in order to draw
regular hexagons ( instead of " stretched " ones ) .
This is roughly equivalent to binning in " screen space " , and
it may be better to use axis - aligned rectangular bins when
plot aspect scales are not one .
Returns :
DataFrame
The resulting DataFrame will have columns * q * and * r * that specify
hexagon tile locations in axial coordinates , and a column * counts * that
provides the count for each tile .
. . warning : :
Hex binning only functions on linear scales , i . e . not on log plots .''' | pd = import_required ( 'pandas' , 'hexbin requires pandas to be installed' )
q , r = cartesian_to_axial ( x , y , size , orientation , aspect_scale = aspect_scale )
df = pd . DataFrame ( dict ( r = r , q = q ) )
return df . groupby ( [ 'q' , 'r' ] ) . size ( ) . reset_index ( name = 'counts' ) |
def add_coconut_to_path ( ) :
"""Adds coconut to sys . path if it isn ' t there already .""" | try :
import coconut
# NOQA
except ImportError :
sys . path . insert ( 0 , os . path . dirname ( os . path . dirname ( os . path . abspath ( __file__ ) ) ) ) |
def isRunActive ( g ) :
"""Polls the data server to see if a run is active""" | if g . cpars [ 'hcam_server_on' ] :
url = g . cpars [ 'hipercam_server' ] + 'summary'
response = urllib . request . urlopen ( url , timeout = 2 )
rs = ReadServer ( response . read ( ) , status_msg = True )
if not rs . ok :
raise DriverError ( 'isRunActive error: ' + str ( rs . err ) )
if rs . state == 'idle' :
return False
elif rs . state == 'active' :
return True
else :
raise DriverError ( 'isRunActive error, state = ' + rs . state )
else :
raise DriverError ( 'isRunActive error: servers are not active' ) |
def _add_function ( self , func , identify_observed ) :
"""Add a function as an observer .
Args :
func : The function to register as an observer .
identify _ observed : See docstring for add _ observer .
Returns :
True if the function is added , otherwise False .""" | key = self . make_key ( func )
if key not in self . observers :
self . observers [ key ] = ObserverFunction ( func , identify_observed , ( key , self . observers ) )
return True
else :
return False |
def findB ( self , tag_name , params = None , fn = None , case_sensitive = False ) :
"""Same as : meth : ` findAllB ` , but without ` endtags ` .
You can always get them from : attr : ` endtag ` property .""" | return [ x for x in self . findAllB ( tag_name , params , fn , case_sensitive ) if not x . isEndTag ( ) ] |
def on_chanmsg ( self , from_ , channel , message ) :
"""Event handler for channel messages .""" | if message == 'hello' :
self . privmsg ( channel , 'Hello, %s!' % from_ [ 0 ] )
print ( '%s said hello!' % from_ [ 0 ] )
elif message == '!quit' :
self . quit ( 'Bye!' ) |
def filter ( self , source_file , encoding ) : # noqa A001
"""Parse XML file .""" | sources = [ ]
if encoding :
with codecs . open ( source_file , 'r' , encoding = encoding ) as f :
src = f . read ( )
sources . extend ( self . _filter ( src , source_file , encoding ) )
else :
for content , filename , enc in self . get_content ( source_file ) :
sources . extend ( self . _filter ( content , source_file , enc ) )
return sources |
def get ( self , pk ) :
"""Returns the object for the key
Override it for efficiency .""" | for item in self . store . get ( self . query_class ) : # coverts pk value to correct type
pk = item . properties [ item . pk ] . col_type ( pk )
if getattr ( item , item . pk ) == pk :
return item |
def _generate_label_matrix ( self ) :
"""Generate an [ n , m ] label matrix with entries in { 0 , . . . , k }""" | self . L = np . zeros ( ( self . n , self . m ) )
self . Y = np . zeros ( self . n , dtype = np . int64 )
for i in range ( self . n ) :
y = choice ( self . k , p = self . p ) + 1
# Note that y \ in { 1 , . . . , k }
self . Y [ i ] = y
for j in range ( self . m ) :
p_j = self . parent . get ( j , 0 )
prob_y = self . P_conditional ( j , y , p_j , self . L [ i , p_j ] , y )
prob_0 = self . P_conditional ( j , 0 , p_j , self . L [ i , p_j ] , y )
p = np . ones ( self . k + 1 ) * ( 1 - prob_y - prob_0 ) / ( self . k - 1 )
p [ 0 ] = prob_0
p [ y ] = prob_y
self . L [ i , j ] = choice ( self . k + 1 , p = p ) |
def extract_attributes ( cls , fields , resource ) :
"""Builds the ` attributes ` object of the JSON API resource object .""" | data = OrderedDict ( )
for field_name , field in six . iteritems ( fields ) : # ID is always provided in the root of JSON API so remove it from attributes
if field_name == 'id' :
continue
# don ' t output a key for write only fields
if fields [ field_name ] . write_only :
continue
# Skip fields with relations
if isinstance ( field , ( relations . RelatedField , relations . ManyRelatedField , BaseSerializer ) ) :
continue
# Skip read _ only attribute fields when ` resource ` is an empty
# serializer . Prevents the " Raw Data " form of the browsable API
# from rendering ` " foo " : null ` for read only fields
try :
resource [ field_name ]
except KeyError :
if fields [ field_name ] . read_only :
continue
data . update ( { field_name : resource . get ( field_name ) } )
return utils . _format_object ( data ) |
def submission_delete ( self , submission_id ) :
"""Delete a submission .""" | response = self . _post ( self . apiurl + '/v2/submission/delete' , data = { 'apikey' : self . apikey , 'submission_id' : submission_id } )
return self . _raise_or_extract ( response ) |
def set_calibration_reps ( self , reps ) :
"""Sets the number of repetitions for calibration stimuli
: param reps : Number of times a unique stimulus is presented in calibration operations
: type reps : int""" | self . bs_calibrator . set_reps ( reps )
self . tone_calibrator . set_reps ( reps ) |
def cycle_dist ( x , y , perimeter ) :
"""Find Distance between x , y by means of a n - length cycle .
: param x :
: param y :
: param perimeter :
Example :
> > > cycle _ dist ( 1 , 23 , 24 ) = 2
> > > cycle _ dist ( 5 , 13 , 24 ) = 8
> > > cycle _ dist ( 0.0 , 2.4 , 1.0 ) = 0.4
> > > cycle _ dist ( 0.0 , 2.6 , 1.0 ) = 0.4
* * 中文文档 * *
假设坐标轴是一个环 , 计算两点之间在环上的最短距离 。""" | dist = abs ( x - y ) % perimeter
if dist > 0.5 * perimeter :
dist = perimeter - dist
return dist |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.