signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def solve_venn3_circles ( venn_areas ) :
'''Given the list of " venn areas " ( as output from compute _ venn3 _ areas , i . e . [ A , B , C , AB , BC , AC , ABC ] ) ,
finds the positions and radii of the three circles .
The return value is a tuple ( coords , radii ) , where coords is a 3x2 array of coordinates and
radii is a 3x1 array of circle radii .
Assumes the input values to be nonnegative and not all zero .
In particular , the first three values must all be positive .
The overall match is only approximate ( to be precise , what is matched are the areas of the circles and the
three pairwise intersections ) .
> > > c , r = solve _ venn3 _ circles ( ( 1 , 1 , 1 , 0 , 0 , 0 , 0 ) )
> > > np . round ( r , 3)
array ( [ 0.564 , 0.564 , 0.564 ] )
> > > c , r = solve _ venn3 _ circles ( compute _ venn3 _ areas ( ( 1 , 2 , 40 , 30 , 4 , 40 , 4 ) ) )
> > > np . round ( r , 3)
array ( [ 0.359 , 0.476 , 0.453 ] )''' | ( A_a , A_b , A_c , A_ab , A_bc , A_ac , A_abc ) = list ( map ( float , venn_areas ) )
r_a , r_b , r_c = np . sqrt ( A_a / np . pi ) , np . sqrt ( A_b / np . pi ) , np . sqrt ( A_c / np . pi )
intersection_areas = [ A_ab , A_bc , A_ac ]
radii = np . array ( [ r_a , r_b , r_c ] )
# Hypothetical distances between circle centers that assure
# that their pairwise intersection areas match the requirements .
dists = [ find_distance_by_area ( radii [ i ] , radii [ j ] , intersection_areas [ i ] ) for ( i , j ) in [ ( 0 , 1 ) , ( 1 , 2 ) , ( 2 , 0 ) ] ]
# How many intersections have nonzero area ?
num_nonzero = sum ( np . array ( [ A_ab , A_bc , A_ac ] ) > tol )
# Handle four separate cases :
# 1 . All pairwise areas nonzero
# 2 . Two pairwise areas nonzero
# 3 . One pairwise area nonzero
# 4 . All pairwise areas zero .
if num_nonzero == 3 : # The " generic " case , simply use dists to position circles at the vertices of a triangle .
# Before we need to ensure that resulting circles can be at all positioned on a triangle ,
# use an ad - hoc fix .
for i in range ( 3 ) :
i , j , k = ( i , ( i + 1 ) % 3 , ( i + 2 ) % 3 )
if dists [ i ] > dists [ j ] + dists [ k ] :
a , b = ( j , k ) if dists [ j ] < dists [ k ] else ( k , j )
dists [ i ] = dists [ b ] + dists [ a ] * 0.8
warnings . warn ( "Bad circle positioning" )
coords = position_venn3_circles_generic ( radii , dists )
elif num_nonzero == 2 : # One pair of circles is not intersecting .
# In this case we can position all three circles in a line
# The two circles that have no intersection will be on either sides .
for i in range ( 3 ) :
if intersection_areas [ i ] < tol :
( left , right , middle ) = ( i , ( i + 1 ) % 3 , ( i + 2 ) % 3 )
coords = np . zeros ( ( 3 , 2 ) )
coords [ middle ] [ 0 ] = dists [ middle ]
coords [ right ] [ 0 ] = dists [ middle ] + dists [ right ]
# We want to avoid the situation where left & right still intersect
if coords [ left ] [ 0 ] + radii [ left ] > coords [ right ] [ 0 ] - radii [ right ] :
mid = ( coords [ left ] [ 0 ] + radii [ left ] + coords [ right ] [ 0 ] - radii [ right ] ) / 2.0
coords [ left ] [ 0 ] = mid - radii [ left ] - 1e-5
coords [ right ] [ 0 ] = mid + radii [ right ] + 1e-5
break
elif num_nonzero == 1 : # Only one pair of circles is intersecting , and one circle is independent .
# Position all on a line first two intersecting , then the free one .
for i in range ( 3 ) :
if intersection_areas [ i ] > tol :
( left , right , side ) = ( i , ( i + 1 ) % 3 , ( i + 2 ) % 3 )
coords = np . zeros ( ( 3 , 2 ) )
coords [ right ] [ 0 ] = dists [ left ]
coords [ side ] [ 0 ] = dists [ left ] + radii [ right ] + radii [ side ] * 1.1
# Pad by 10%
break
else : # All circles are non - touching . Put them all in a sequence
coords = np . zeros ( ( 3 , 2 ) )
coords [ 1 ] [ 0 ] = radii [ 0 ] + radii [ 1 ] * 1.1
coords [ 2 ] [ 0 ] = radii [ 0 ] + radii [ 1 ] * 1.1 + radii [ 1 ] + radii [ 2 ] * 1.1
coords = normalize_by_center_of_mass ( coords , radii )
return ( coords , radii ) |
def FloatStringToFloat ( float_string , problems = None ) :
"""Convert a float as a string to a float or raise an exception""" | # Will raise TypeError unless a string
match = re . match ( r"^[+-]?\d+(\.\d+)?$" , float_string )
# Will raise TypeError if the string can ' t be parsed
parsed_value = float ( float_string )
if "x" in float_string : # This is needed because Python 2.4 does not complain about float ( " 0x20 " ) .
# But it does complain about float ( " 0b10 " ) , so this should be enough .
raise ValueError ( )
if not match and problems is not None : # Does not match the regex , but it ' s a float according to Python
problems . InvalidFloatValue ( float_string )
return parsed_value |
def _add_edge ( self , layer , input_id , output_id ) :
"""Add a new layer to the graph . The nodes should be created in advance .""" | if layer in self . layer_to_id :
layer_id = self . layer_to_id [ layer ]
if input_id not in self . layer_id_to_input_node_ids [ layer_id ] :
self . layer_id_to_input_node_ids [ layer_id ] . append ( input_id )
if output_id not in self . layer_id_to_output_node_ids [ layer_id ] :
self . layer_id_to_output_node_ids [ layer_id ] . append ( output_id )
else :
layer_id = len ( self . layer_list )
self . layer_list . append ( layer )
self . layer_to_id [ layer ] = layer_id
self . layer_id_to_input_node_ids [ layer_id ] = [ input_id ]
self . layer_id_to_output_node_ids [ layer_id ] = [ output_id ]
self . adj_list [ input_id ] . append ( ( output_id , layer_id ) )
self . reverse_adj_list [ output_id ] . append ( ( input_id , layer_id ) ) |
def slices_to_layers ( G_coupling , slice_attr = 'slice' , vertex_id_attr = 'id' , edge_type_attr = 'type' , weight_attr = 'weight' ) :
"""Convert a coupling graph of slices to layers of graphs .
This function converts a graph of slices to layers so that they can be used
with this package . This function assumes that the slices are represented by
nodes in ` ` G _ coupling ` ` , and stored in the attribute ` ` slice _ attr ` ` . In other
words , ` ` G _ coupling . vs [ slice _ attr ] ` ` should contain : class : ` ig . Graph ` s . The
slices will be converted to layers , and nodes in different slices will be
coupled if the two slices are connected in ` ` G _ coupling ` ` . Nodes in two
connected slices are identified on the basis of the ` ` vertex _ id _ attr ` ` , i . e .
if two nodes in two connected slices have an identical value of the
` ` vertex _ id _ attr ` ` they will be coupled . The ` ` vertex _ id _ attr ` ` should hence
be unique in each slice . The weight of the coupling is determined by the
weight of this link in ` ` G _ coupling ` ` , as determined by the ` ` weight _ attr ` ` .
Parameters
G _ coupling : : class : ` ig . Graph `
The graph connecting the different slices .
slice _ attr : string
The vertex attribute which contains the slices .
edge _ type _ attr : string
The edge attribute to use for indicating the type of link ( ` ` interslice ` `
or ` ` intraslice ` ` ) .
weight _ attr : string
The edge attribute used to indicate the ( coupling ) weight .
Returns
G _ layers : list of : class : ` ig . Graph `
A list of slices converted to layers .
G _ interslice : : class : ` ig . Graph `
The interslice coupling layer .
G : : class : ` ig . Graph `
The complete graph containing all layers and interslice couplings .
Notes
The distinction between slices and layers is not easy to grasp . Slices in
this context refer to graphs that somehow represents different aspects of a
network . The simplest example is probably slices that represents time : there
are different snapshots network across time , and each snapshot is considered
a slice . Some nodes may drop out of the network over time , while others enter
the network . Edges may change over time , or the weight of the links may
change over time . This is just the simplest example of a slice , and there may
be different , more complex possibilities . Below an example with three time
slices :
. . image : : figures / slices . png
Now in order to optimise partitions across these different slices , we
represent them slightly differently , namely as layers . The idea of layers is
that all graphs always are defined on the same set of nodes , and that only
the links differ for different layers . We thus create new nodes as
combinations of original nodes and slices . For example , if node 1 existed in
both slice 1 and in slice 2 , we will thus create two nodes to build the
layers : a node 1-1 and a node 1-2 . Additionally , if the slices are connected
in the slice graph , the two nodes would also be connected , so there would be
a linke between node 1-1 and 1-2 . Different slices will then correspond to
different layers : each layer only contains the link for that particular
slice . In addition , for methods such as : class : ` CPMVertexPartition ` ,
so - called ` ` node _ sizes ` ` are required , and for them to properly function ,
they should be set to 0 ( which is handled appropriately in this function , and
stored in the vertex attribute ` ` node _ size ` ` ) . We thus obtain equally many
layers as we have slices , and we need one more layer for representing the
interslice couplings . For the example provided above , we thus obtain the
following :
. . image : : figures / layers _ separate . png
The idea of doing community detection with slices is further detailed in [ 1 ] .
References
. . [ 1 ] Mucha , P . J . , Richardson , T . , Macon , K . , Porter , M . A . , & Onnela ,
J . - P . ( 2010 ) . Community structure in time - dependent , multiscale , and
multiplex networks . Science , 328(5980 ) , 876-8.
` 10.1126 / science . 1184819 < http : / / doi . org / 10.1126 / science . 1184819 > ` _
See Also
: func : ` find _ partition _ temporal `
: func : ` time _ slices _ to _ layers `""" | if not slice_attr in G_coupling . vertex_attributes ( ) :
raise ValueError ( "Could not find the vertex attribute {0} in the coupling graph." . format ( slice_attr ) )
if not weight_attr in G_coupling . edge_attributes ( ) :
raise ValueError ( "Could not find the edge attribute {0} in the coupling graph." . format ( weight_attr ) )
# Create disjoint union of the time graphs
for v_slice in G_coupling . vs :
H = v_slice [ slice_attr ]
H . vs [ slice_attr ] = v_slice . index
if not vertex_id_attr in H . vertex_attributes ( ) :
raise ValueError ( "Could not find the vertex attribute {0} to identify nodes in different slices." . format ( vertex_id_attr ) )
if not weight_attr in H . edge_attributes ( ) :
H . es [ weight_attr ] = 1
G = disjoint_union_attrs ( G_coupling . vs [ slice_attr ] )
G . es [ edge_type_attr ] = 'intraslice'
for v_slice in G_coupling . vs :
for u_slice in v_slice . neighbors ( mode = _ig . OUT ) :
if v_slice . index < u_slice . index or G_coupling . is_directed ( ) :
nodes_v = G . vs . select ( lambda v : v [ slice_attr ] == v_slice . index ) [ vertex_id_attr ]
if len ( set ( nodes_v ) ) != len ( nodes_v ) :
err = '\n' . join ( [ '\t{0} {1} times' . format ( item , count ) for item , count in Counter ( nodes_v ) . items ( ) if count > 1 ] )
raise ValueError ( 'No unique IDs for slice {0}, require unique IDs:\n{1}' . format ( v_slice . index , err ) )
nodes_u = G . vs . select ( lambda v : v [ slice_attr ] == u_slice . index ) [ vertex_id_attr ]
if len ( set ( nodes_u ) ) != len ( nodes_u ) :
err = '\n' . join ( [ '\t{0} {1} times' . format ( item , count ) for item , count in Counter ( nodes_u ) . items ( ) if count > 1 ] )
raise ValueError ( 'No unique IDs for slice {0}, require unique IDs:\n{1}' . format ( u_slice . index , err ) )
common_nodes = set ( nodes_v ) . intersection ( set ( nodes_u ) )
nodes_v = sorted ( [ v for v in G . vs if v [ slice_attr ] == v_slice . index and v [ vertex_id_attr ] in common_nodes ] , key = lambda v : v [ vertex_id_attr ] )
nodes_u = sorted ( [ v for v in G . vs if v [ slice_attr ] == u_slice . index and v [ vertex_id_attr ] in common_nodes ] , key = lambda v : v [ vertex_id_attr ] )
edges = zip ( nodes_v , nodes_u )
e_start = G . ecount ( )
G . add_edges ( edges )
e_end = G . ecount ( )
e_idx = range ( e_start , e_end )
interslice_weight = G_coupling . es [ G_coupling . get_eid ( v_slice , u_slice ) ] [ weight_attr ]
if not interslice_weight is None :
G . es [ e_idx ] [ weight_attr ] = interslice_weight
G . es [ e_idx ] [ edge_type_attr ] = 'interslice'
# Convert aggregate graph to individual layers for each time slice .
G_layers = [ None ] * G_coupling . vcount ( )
for v_slice in G_coupling . vs :
H = G . subgraph_edges ( G . es . select ( _within = [ v . index for v in G . vs if v [ slice_attr ] == v_slice . index ] ) , delete_vertices = False )
H . vs [ 'node_size' ] = [ 1 if v [ slice_attr ] == v_slice . index else 0 for v in H . vs ]
G_layers [ v_slice . index ] = H
# Create one graph for the interslice links .
G_interslice = G . subgraph_edges ( G . es . select ( type_eq = 'interslice' ) , delete_vertices = False )
G_interslice . vs [ 'node_size' ] = 0
return G_layers , G_interslice , G |
def depsignal ( class_ , signal_name , * , defer = False ) :
"""Connect the decorated method or coroutine method to the addressed signal on
a class on which the service depends .
: param class _ : A service class which is listed in the
: attr : ` ~ . Meta . ORDER _ AFTER ` relationship .
: type class _ : : class : ` Service ` class or one of the special cases below
: param signal _ name : Attribute name of the signal to connect to
: type signal _ name : : class : ` str `
: param defer : Flag indicating whether deferred execution of the decorated
method is desired ; see below for details .
: type defer : : class : ` bool `
The signal is discovered by accessing the attribute with the name
` signal _ name ` on the given ` class _ ` . In addition , the following arguments
are supported for ` class _ ` :
1 . : class : ` aioxmpp . stream . StanzaStream ` : the corresponding signal of the
stream of the client running the service is used .
2 . : class : ` aioxmpp . Client ` : the corresponding signal of the client running
the service is used .
If the signal is a : class : ` . callbacks . Signal ` and ` defer ` is false , the
decorated object is connected using the default
: attr : ` ~ . callbacks . AdHocSignal . STRONG ` mode .
If the signal is a : class : ` . callbacks . Signal ` and ` defer ` is true and the
decorated object is a coroutine function , the
: attr : ` ~ . callbacks . AdHocSignal . SPAWN _ WITH _ LOOP ` mode with the default
asyncio event loop is used . If the decorated object is not a coroutine
function , : attr : ` ~ . callbacks . AdHocSignal . ASYNC _ WITH _ LOOP ` is used instead .
If the signal is a : class : ` . callbacks . SyncSignal ` , ` defer ` must be false
and the decorated object must be a coroutine function .
. . versionchanged : : 0.9
Support for : class : ` aioxmpp . stream . StanzaStream ` and
: class : ` aioxmpp . Client ` as ` class _ ` argument was added .""" | def decorator ( f ) :
add_handler_spec ( f , _depsignal_spec ( class_ , signal_name , f , defer ) )
return f
return decorator |
def setPixelColorRGB ( self , n , red , green , blue ) :
"""Set LED at position n to the provided red , green , and blue color .
Each color component should be a value from 0 to 255 ( where 0 is the
lowest intensity and 255 is the highest intensity ) .""" | self . setPixelColor ( n , Color ( red , green , blue ) ) |
def load_code ( self , path , package , callwith ) :
'''Used internally when loading code . You should probably use
load _ objects ( ) .''' | sys . path = [ path ] + sys . path
g_o = importlib . import_module ( package ) . get_objects
del sys . path [ 0 ]
for obj in g_o ( callwith ) :
self . code [ obj . title . lower ( ) ] = obj |
def clean_title ( title ) :
"""Clean title - > remove dates , remove duplicated spaces and strip title .
Args :
title ( str ) : Title .
Returns :
str : Clean title without dates , duplicated , trailing and leading spaces .""" | date_pattern = re . compile ( r'\W*' r'\d{1,2}' r'[/\-.]' r'\d{1,2}' r'[/\-.]' r'(?=\d*)(?:.{4}|.{2})' r'\W*' )
title = date_pattern . sub ( ' ' , title )
title = re . sub ( r'\s{2,}' , ' ' , title )
title = title . strip ( )
return title |
def _cryptography_cipher ( key , iv ) :
"""Build a cryptography AES Cipher object .
: param bytes key : Encryption key
: param bytes iv : Initialization vector
: returns : AES Cipher instance
: rtype : cryptography . hazmat . primitives . ciphers . Cipher""" | return Cipher ( algorithm = algorithms . AES ( key ) , mode = modes . CFB ( iv ) , backend = default_backend ( ) ) |
def colorbrewer ( values , alpha = 255 ) :
"""Return a dict of colors for the unique values .
Colors are adapted from Harrower , Mark , and Cynthia A . Brewer .
" ColorBrewer . org : an online tool for selecting colour schemes for maps . "
The Cartographic Journal 40.1 ( 2003 ) : 27-37.
: param values : values
: param alpha : color alphs
: return : dict of colors for the unique values .""" | basecolors = [ [ 31 , 120 , 180 ] , [ 178 , 223 , 138 ] , [ 51 , 160 , 44 ] , [ 251 , 154 , 153 ] , [ 227 , 26 , 28 ] , [ 253 , 191 , 111 ] , [ 255 , 127 , 0 ] , [ 202 , 178 , 214 ] , [ 106 , 61 , 154 ] , [ 255 , 255 , 153 ] , [ 177 , 89 , 40 ] ]
unique_values = list ( set ( values ) )
return { k : basecolors [ i % len ( basecolors ) ] + [ alpha ] for i , k in enumerate ( unique_values ) } |
def request_name ( self , name ) :
"""Request a name , might return the name or a similar one if already
used or reserved""" | while name in self . _blacklist :
name += "_"
self . _blacklist . add ( name )
return name |
def writeJsonZipfile ( filelike , data , compress = True , mode = 'w' , name = 'data' ) :
"""Serializes the objects contained in data to a JSON formated string and
writes it to a zipfile .
: param filelike : path to a file ( str ) or a file - like object
: param data : object that should be converted to a JSON formated string .
Objects and types in data must be supported by the json . JSONEncoder or
have the method ` ` . _ reprJSON ( ) ` ` defined .
: param compress : bool , True to use zip file compression
: param mode : ' w ' to truncate and write a new file , or ' a ' to append to an
existing file
: param name : the file name that will be given to the JSON output in the
archive""" | zipcomp = zipfile . ZIP_DEFLATED if compress else zipfile . ZIP_STORED
with zipfile . ZipFile ( filelike , mode , allowZip64 = True ) as containerFile :
containerFile . writestr ( name , json . dumps ( data , cls = MaspyJsonEncoder ) , zipcomp ) |
def concat_ws ( sep , * cols ) :
"""Concatenates multiple input string columns together into a single string column ,
using the given separator .
> > > df = spark . createDataFrame ( [ ( ' abcd ' , ' 123 ' ) ] , [ ' s ' , ' d ' ] )
> > > df . select ( concat _ ws ( ' - ' , df . s , df . d ) . alias ( ' s ' ) ) . collect ( )
[ Row ( s = u ' abcd - 123 ' ) ]""" | sc = SparkContext . _active_spark_context
return Column ( sc . _jvm . functions . concat_ws ( sep , _to_seq ( sc , cols , _to_java_column ) ) ) |
def find_for_x_in_y_keys ( node ) :
"""Finds looping against dictionary keys""" | return ( isinstance ( node , ast . For ) and h . call_name_is ( node . iter , 'keys' ) ) |
def visit_Dict ( self , node : ast . Dict ) -> Dict [ Any , Any ] :
"""Visit keys and values and assemble a dictionary with the results .""" | recomputed_dict = dict ( )
# type : Dict [ Any , Any ]
for key , val in zip ( node . keys , node . values ) :
recomputed_dict [ self . visit ( node = key ) ] = self . visit ( node = val )
self . recomputed_values [ node ] = recomputed_dict
return recomputed_dict |
def environment_does_base_variable_exist ( self , name ) :
"""Checks if the given environment variable exists in the session ' s base
environment ( : py : func : ` IGuestSession . environment _ base ` ) .
in name of type str
Name of the environment variable to look for . This cannot be
empty nor can it contain any equal signs .
return exists of type bool
TRUE if the variable exists , FALSE if not .
raises : class : ` VBoxErrorNotSupported `
If the guest additions does not
support the session base environment feature . Support for this was
introduced with protocol version XXXX .
raises : class : ` VBoxErrorInvalidObjectState `
If the guest additions has
yet to report the session base environment .""" | if not isinstance ( name , basestring ) :
raise TypeError ( "name can only be an instance of type basestring" )
exists = self . _call ( "environmentDoesBaseVariableExist" , in_p = [ name ] )
return exists |
def agent ( self ) :
"""This method returns the agent name .
: return :""" | try :
if self . _data_from_search :
agent = self . _data_from_search . find ( 'ul' , { 'class' : 'links' } ) . text
return agent . split ( ':' ) [ 1 ] . strip ( )
else :
return self . _ad_page_content . find ( 'a' , { 'id' : 'smi-link-branded' } ) . text . strip ( )
except Exception as e :
if self . _debug :
logging . error ( "Error getting agent. Error message: " + e . args [ 0 ] )
return |
def connection ( self ) :
"""A context manager that returns a connection
to the server using some * session * .""" | conn = self . session ( ** self . options )
try :
for item in self . middlewares :
item ( conn )
yield conn
finally :
conn . teardown ( ) |
def smoothed_moving_average ( data , period ) :
"""Smoothed Moving Average .
Formula :
smma = avg ( data ( n ) ) - avg ( data ( n ) / n ) + data ( t ) / n""" | catch_errors . check_for_period_error ( data , period )
series = pd . Series ( data )
return series . ewm ( alpha = 1.0 / period ) . mean ( ) . values . flatten ( ) |
def find ( self , cell_designation , cell_filter = lambda x , c : 'c' in x and x [ 'c' ] == c ) :
"""finds spike containers in multi spike containers collection offspring""" | if 'parent' in self . meta :
return ( self . meta [ 'parent' ] , self . meta [ 'parent' ] . find ( cell_designation , cell_filter = cell_filter ) ) |
def hex_colors ( self ) :
"""Colors as a tuple of hex strings . ( e . g . ' # A912F4 ' )""" | hc = [ ]
for color in self . colors :
h = '#' + '' . join ( '{0:>02}' . format ( hex ( c ) [ 2 : ] . upper ( ) ) for c in color )
hc . append ( h )
return hc |
def squelch ( self , threshold ) :
"""Set all records that do not exceed the given threhsold to 0.
Parameters
threshold : scalar
Level below which to set records to zero""" | func = lambda x : zeros ( x . shape ) if max ( x ) < threshold else x
return self . map ( func ) |
def SetLowerTimestamp ( cls , timestamp ) :
"""Sets the lower bound timestamp .""" | if not hasattr ( cls , '_lower' ) :
cls . _lower = timestamp
return
if timestamp < cls . _lower :
cls . _lower = timestamp |
def generate_data_for_env_problem ( problem_name ) :
"""Generate data for ` EnvProblem ` s .""" | assert FLAGS . env_problem_max_env_steps > 0 , ( "--env_problem_max_env_steps " "should be greater than zero" )
assert FLAGS . env_problem_batch_size > 0 , ( "--env_problem_batch_size should be" " greather than zero" )
problem = registry . env_problem ( problem_name )
task_id = None if FLAGS . task_id < 0 else FLAGS . task_id
data_dir = os . path . expanduser ( FLAGS . data_dir )
tmp_dir = os . path . expanduser ( FLAGS . tmp_dir )
# TODO ( msaffar ) : Handle large values for env _ problem _ batch _ size where we
# cannot create that many environments within the same process .
problem . initialize ( batch_size = FLAGS . env_problem_batch_size )
env_problem_utils . play_env_problem_randomly ( problem , num_steps = FLAGS . env_problem_max_env_steps )
problem . generate_data ( data_dir = data_dir , tmp_dir = tmp_dir , task_id = task_id ) |
def _ls_print_summary ( all_trainings : List [ Tuple [ str , dict , TrainingTrace ] ] ) -> None :
"""Print trainings summary .
In particular print tables summarizing the number of trainings with
- particular model names
- particular combinations of models and datasets
: param all _ trainings : a list of training tuples ( train _ dir , configuration dict , trace )""" | counts_by_name = defaultdict ( int )
counts_by_classes = defaultdict ( int )
for _ , config , _ in all_trainings :
counts_by_name [ get_model_name ( config ) ] += 1
counts_by_classes [ get_classes ( config ) ] += 1
print_boxed ( 'summary' )
print ( )
counts_table = [ [ name , count ] for name , count in counts_by_name . items ( ) ]
print ( tabulate ( counts_table , headers = [ 'model.name' , 'count' ] , tablefmt = 'grid' ) )
print ( )
counts_table = [ [ classes [ 0 ] , classes [ 1 ] , count ] for classes , count in counts_by_classes . items ( ) ]
print ( tabulate ( counts_table , headers = [ 'model.class' , 'dataset.class' , 'count' ] , tablefmt = 'grid' ) )
print ( ) |
def convert_objects ( self , convert_dates = True , convert_numeric = False , convert_timedeltas = True , copy = True ) :
"""Attempt to infer better dtype for object columns .
. . deprecated : : 0.21.0
Parameters
convert _ dates : boolean , default True
If True , convert to date where possible . If ' coerce ' , force
conversion , with unconvertible values becoming NaT .
convert _ numeric : boolean , default False
If True , attempt to coerce to numbers ( including strings ) , with
unconvertible values becoming NaN .
convert _ timedeltas : boolean , default True
If True , convert to timedelta where possible . If ' coerce ' , force
conversion , with unconvertible values becoming NaT .
copy : boolean , default True
If True , return a copy even if no copy is necessary ( e . g . no
conversion was done ) . Note : This is meant for internal use , and
should not be confused with inplace .
Returns
converted : same as input object
See Also
to _ datetime : Convert argument to datetime .
to _ timedelta : Convert argument to timedelta .
to _ numeric : Convert argument to numeric type .""" | msg = ( "convert_objects is deprecated. To re-infer data dtypes for " "object columns, use {klass}.infer_objects()\nFor all " "other conversions use the data-type specific converters " "pd.to_datetime, pd.to_timedelta and pd.to_numeric." ) . format ( klass = self . __class__ . __name__ )
warnings . warn ( msg , FutureWarning , stacklevel = 2 )
return self . _constructor ( self . _data . convert ( convert_dates = convert_dates , convert_numeric = convert_numeric , convert_timedeltas = convert_timedeltas , copy = copy ) ) . __finalize__ ( self ) |
def role_list ( endpoint_id ) :
"""Executor for ` globus access endpoint - role - list `""" | client = get_client ( )
roles = client . endpoint_role_list ( endpoint_id )
resolved_ids = LazyIdentityMap ( x [ "principal" ] for x in roles if x [ "principal_type" ] == "identity" )
def principal_str ( role ) :
principal = role [ "principal" ]
if role [ "principal_type" ] == "identity" :
username = resolved_ids . get ( principal )
return username or principal
elif role [ "principal_type" ] == "group" :
return ( u"https://app.globus.org/groups/{}" ) . format ( principal )
else :
return principal
formatted_print ( roles , fields = [ ( "Principal Type" , "principal_type" ) , ( "Role ID" , "id" ) , ( "Principal" , principal_str ) , ( "Role" , "role" ) , ] , ) |
def get_latex_name ( func_in , ** kwargs ) :
"""Produce a latex formatted name for each function for use in labelling
results .
Parameters
func _ in : function
kwargs : dict , optional
Kwargs for function .
Returns
latex _ name : str
Latex formatted name for the function .""" | if isinstance ( func_in , functools . partial ) :
func = func_in . func
assert not set ( func_in . keywords ) & set ( kwargs ) , ( 'kwargs={0} and func_in.keywords={1} contain repeated keys' . format ( kwargs , func_in . keywords ) )
kwargs . update ( func_in . keywords )
else :
func = func_in
param_ind = kwargs . pop ( 'param_ind' , 0 )
probability = kwargs . pop ( 'probability' , 0.5 )
kwargs . pop ( 'handle_indexerror' , None )
if kwargs :
raise TypeError ( 'Unexpected **kwargs: {0}' . format ( kwargs ) )
ind_str = r'{\hat{' + str ( param_ind + 1 ) + '}}'
latex_name_dict = { 'count_samples' : r'samples' , 'logz' : r'$\mathrm{log} \mathcal{Z}$' , 'evidence' : r'$\mathcal{Z}$' , 'r_mean' : r'$\overline{|\theta|}$' , 'param_mean' : r'$\overline{\theta_' + ind_str + '}$' , 'param_squared_mean' : r'$\overline{\theta^2_' + ind_str + '}$' }
# Add credible interval names
if probability == 0.5 :
cred_str = r'$\mathrm{median}('
else : # format percent without trailing zeros
percent_str = ( '%f' % ( probability * 100 ) ) . rstrip ( '0' ) . rstrip ( '.' )
cred_str = r'$\mathrm{C.I.}_{' + percent_str + r'\%}('
latex_name_dict [ 'param_cred' ] = cred_str + r'\theta_' + ind_str + ')$'
latex_name_dict [ 'r_cred' ] = cred_str + r'|\theta|)$'
try :
return latex_name_dict [ func . __name__ ]
except KeyError as err :
err . args = err . args + ( 'get_latex_name not yet set up for ' + func . __name__ , )
raise |
def _bind_indirect_user ( self , ldap , con ) :
"""If using AUTH _ LDAP _ BIND _ USER bind this user before performing search
: param ldap : The ldap module reference
: param con : The ldap connection""" | indirect_user = self . auth_ldap_bind_user
if indirect_user :
indirect_password = self . auth_ldap_bind_password
log . debug ( "LDAP indirect bind with: {0}" . format ( indirect_user ) )
con . bind_s ( indirect_user , indirect_password )
log . debug ( "LDAP BIND indirect OK" ) |
def is_valid ( self ) :
"""Check integrity and validity of this frame .
: return : bool True if this frame is structurally valid .""" | conditions = [ self . protocol_id == 0 , # Modbus always uses protocol 0
2 <= self . length <= 260 , # Absolute length limits
len ( self . data ) == self . length - 2 , # Total length matches data length
]
return all ( conditions ) |
def from_dict ( cls , d ) :
"""Instantiate a Variable from a dictionary representation .""" | return cls ( d [ 'type' ] , tuple ( d [ 'parents' ] ) , list ( d [ 'properties' ] . items ( ) ) ) |
def request_uplink_info ( self , payload ) :
"""Get the uplink from the database and send the info to the agent .""" | # This request is received from an agent when it run for the first
# Send the uplink name ( physical port name that connectes compute
# node and switch fabric ) ,
agent = payload . get ( 'agent' )
config_res = self . get_agent_configurations ( agent )
LOG . debug ( 'configurations on %(agent)s is %(cfg)s' , ( { 'agent' : agent , 'cfg' : config_res } ) )
try :
self . neutron_event . send_msg_to_agent ( agent , constants . UPLINK_NAME , config_res )
except ( rpc . MessagingTimeout , rpc . RPCException , rpc . RemoteError ) :
LOG . error ( "RPC error: Failed to send uplink name to agent." ) |
def string_tokenizer ( self , untokenized_string : str , include_blanks = False ) :
"""This function is based off CLTK ' s line tokenizer . Use this for strings
rather than . txt files .
input : ' 20 . u2 - sza - bi - la - kum \n 1 . a - na ia - as2 - ma - ah - { d } iszkur # \n 2.
qi2 - bi2 - ma \n 3 . um - ma { d } utu - szi - { d } iszkur \n '
output : [ ' 20 . u2 - sza - bi - la - kum ' , ' 1 . a - na ia - as2 - ma - ah - { d } iszkur # ' ,
'2 . qi2 - bi2 - ma ' ]
: param untokenized _ string : string
: param include _ blanks : instances of empty lines
: return : lines as strings in list""" | line_output = [ ]
assert isinstance ( untokenized_string , str ) , 'Incoming argument must be a string.'
if include_blanks :
tokenized_lines = untokenized_string . splitlines ( )
else :
tokenized_lines = [ line for line in untokenized_string . splitlines ( ) if line != r'\\n' ]
for line in tokenized_lines : # Strip out damage characters
if not self . damage : # Add ' xn ' - - missing sign or number ?
line = '' . join ( c for c in line if c not in "#[]?!*" )
re . match ( r'^\d*\.|\d\'\.' , line )
line_output . append ( line . rstrip ( ) )
return line_output |
def _resolve_call ( self , table , column = '' , value = '' , ** kwargs ) :
"""Internal method to resolve the API wrapper call .""" | if not column :
return self . catalog ( table )
elif not value :
return self . catalog ( table , column )
# We have all the table , column , and value , and now need to
# ensure they ' re all strings and uppercase .
column = column . upper ( )
value = str ( value ) . upper ( )
data = self . call_api ( table , column , value , ** kwargs )
if isinstance ( data , dict ) : # Data is actually the first value .
data = data . values ( ) [ 0 ]
return data |
def main ( ) :
"""NAME
lnp _ magic . py
DESCRIPTION
makes equal area projections site by site
from specimen formatted file with
Fisher confidence ellipse using McFadden and McElhinny ( 1988)
technique for combining lines and planes
SYNTAX
lnp _ magic [ command line options ]
INPUT
takes magic formatted specimens file
OUPUT
prints site _ name n _ lines n _ planes K alpha95 dec inc R
OPTIONS
- h prints help message and quits
- f FILE : specify input file , default is ' specimens . txt ' , ( ' pmag _ specimens . txt ' for legacy data model 2)
- fsa FILE : specify samples file , required to plot by site for data model 3 ( otherwise will plot by sample )
default is ' samples . txt '
- crd [ s , g , t ] : specify coordinate system , [ s ] pecimen , [ g ] eographic , [ t ] ilt adjusted
default is specimen
- fmt [ svg , png , jpg ] format for plots , default is svg
- sav save plots and quit
- P : do not plot
- F FILE , specify output file of dec , inc , alpha95 data for plotting with plotdi _ a and plotdi _ e
- exc use criteria in criteria table # NOT IMPLEMENTED
- DM NUMBER MagIC data model ( 2 or 3 , default 3)""" | if '-h' in sys . argv :
print ( main . __doc__ )
sys . exit ( )
dir_path = pmag . get_named_arg ( "-WD" , "." )
data_model = int ( float ( pmag . get_named_arg ( "-DM" , 3 ) ) )
fmt = pmag . get_named_arg ( "-fmt" , 'svg' )
if data_model == 2 :
in_file = pmag . get_named_arg ( '-f' , 'pmag_specimens.txt' )
crit_file = "pmag_criteria.txt"
else :
in_file = pmag . get_named_arg ( '-f' , 'specimens.txt' )
samp_file = pmag . get_named_arg ( '-fsa' , 'samples.txt' )
crit_file = "criteria.txt"
in_file = pmag . resolve_file_name ( in_file , dir_path )
dir_path = os . path . split ( in_file ) [ 0 ]
if data_model == 3 :
samp_file = pmag . resolve_file_name ( samp_file , dir_path )
if '-crd' in sys . argv :
ind = sys . argv . index ( "-crd" )
crd = sys . argv [ ind + 1 ]
if crd == 's' :
coord = "-1"
if crd == 'g' :
coord = "0"
if crd == 't' :
coord = "100"
else :
coord = "-1"
out_file = pmag . get_named_arg ( '-F' , '' )
if out_file :
out = open ( dir_path + '/' + out_file , 'w' )
if '-P' in sys . argv :
make_plots = 0
# do not plot
else :
make_plots = 1
# do plot
if '-sav' in sys . argv :
plot = 1
# save plots and quit
else :
plot = 0
# show plots intereactively ( if make _ plots )
if data_model == 2 :
Specs , file_type = pmag . magic_read ( in_file )
if 'specimens' not in file_type :
print ( 'Error opening ' , in_file , file_type )
sys . exit ( )
else :
fnames = { 'specimens' : in_file , 'samples' : samp_file }
con = cb . Contribution ( dir_path , read_tables = [ 'samples' , 'specimens' ] , custom_filenames = fnames )
con . propagate_name_down ( 'site' , 'specimens' )
if 'site' in con . tables [ 'specimens' ] . df . columns :
site_col = 'site'
else :
site_col = 'sample'
tilt_corr_col = "dir_tilt_correction"
mad_col = "dir_mad_free"
alpha95_col = "dir_alpha95"
site_alpha95_col = "dir_alpha95"
dec_col = "dir_dec"
inc_col = "dir_inc"
num_meas_col = "dir_n_measurements"
k_col = "dir_k"
cols = [ site_col , tilt_corr_col , mad_col , alpha95_col , dec_col , inc_col ]
con . tables [ 'specimens' ] . front_and_backfill ( cols )
con . tables [ 'specimens' ] . df = con . tables [ 'specimens' ] . df . where ( con . tables [ 'specimens' ] . df . notnull ( ) , "" )
Specs = con . tables [ 'specimens' ] . convert_to_pmag_data_list ( )
# # using criteria file was never fully implemented
# if ' - exc ' in sys . argv :
# Crits , file _ type = pmag . magic _ read ( pmag . resolve _ file _ name ( crit _ file , dir _ path ) )
# for crit in Crits :
# if mad _ col in crit :
# M = float ( crit [ ' specimen _ mad ' ] )
# if num _ meas _ col in crit :
# N = float ( crit [ ' specimen _ n ' ] )
# if site _ alpha95 _ col in crit and ' site ' in crit :
# acutoff = float ( crit [ ' site _ alpha95 ' ] )
# if k _ col in crit :
# kcutoff = float ( crit [ ' site _ k ' ] )
# else :
# Crits = " "
sitelist = [ ]
# initialize some variables
FIG = { }
# plot dictionary
FIG [ 'eqarea' ] = 1
# eqarea is figure 1
M , N , acutoff , kcutoff = 180. , 1 , 180. , 0.
if data_model == 2 :
site_col = 'er_site_name'
tilt_corr_col = "specimen_tilt_correction"
mad_col = "specimen_mad"
alpha95_col = 'specimen_alpha95'
dec_col = "specimen_dec"
inc_col = "specimen_inc"
num_meas_col = "specimen_n"
site_alpha95_col = "site_alpha95"
else : # data model 3
pass
for rec in Specs :
if rec [ site_col ] not in sitelist :
sitelist . append ( rec [ site_col ] )
sitelist . sort ( )
if make_plots == 1 :
EQ = { }
EQ [ 'eqarea' ] = 1
for site in sitelist :
pmagplotlib . plot_init ( EQ [ 'eqarea' ] , 4 , 4 )
print ( site )
data = [ ]
for spec in Specs :
if tilt_corr_col not in list ( spec . keys ( ) ) :
spec [ tilt_corr_col ] = '-1'
# assume unoriented
if spec [ site_col ] == site :
if mad_col not in list ( spec . keys ( ) ) or spec [ mad_col ] == "" :
if alpha95_col in list ( spec . keys ( ) ) and spec [ alpha95_col ] != "" :
spec [ mad_col ] = spec [ alpha95_col ]
else :
spec [ mad_col ] = '180'
if not spec [ num_meas_col ] :
continue
if ( float ( spec [ tilt_corr_col ] ) == float ( coord ) ) and ( float ( spec [ mad_col ] ) <= M ) and ( float ( spec [ num_meas_col ] ) >= N ) :
rec = { }
for key in list ( spec . keys ( ) ) :
rec [ key ] = spec [ key ]
rec [ "dec" ] = float ( spec [ dec_col ] )
rec [ "inc" ] = float ( spec [ inc_col ] )
rec [ "tilt_correction" ] = spec [ tilt_corr_col ]
data . append ( rec )
if len ( data ) > 2 :
fpars = pmag . dolnp ( data , 'specimen_direction_type' )
print ( "Site lines planes kappa a95 dec inc" )
print ( site , fpars [ "n_lines" ] , fpars [ "n_planes" ] , fpars [ "K" ] , fpars [ "alpha95" ] , fpars [ "dec" ] , fpars [ "inc" ] , fpars [ "R" ] )
if out_file != "" :
if float ( fpars [ "alpha95" ] ) <= acutoff and float ( fpars [ "K" ] ) >= kcutoff :
out . write ( '%s %s %s\n' % ( fpars [ "dec" ] , fpars [ 'inc' ] , fpars [ 'alpha95' ] ) )
print ( '% tilt correction: ' , coord )
if make_plots == 1 :
files = { }
files [ 'eqarea' ] = site + '_' + crd + '_' + 'eqarea' + '.' + fmt
pmagplotlib . plot_lnp ( EQ [ 'eqarea' ] , site , data , fpars , 'specimen_direction_type' )
if plot == 0 :
pmagplotlib . draw_figs ( EQ )
ans = input ( "s[a]ve plot, [q]uit, <return> to continue:\n " )
if ans == "a" :
pmagplotlib . save_plots ( EQ , files )
if ans == "q" :
sys . exit ( )
else :
pmagplotlib . save_plots ( EQ , files )
else :
print ( 'skipping site - not enough data with specified coordinate system' ) |
def _log ( self , level , fmt , args = None , extra = None , exc_info = None , inc_stackinfo = False , inc_multiproc = False ) :
"""Send a log message to all of the logging functions
for a given level as well as adding the
message to this logger instance ' s history .""" | if not self . enabled :
return
# Fail silently so that logging can easily be removed
log_record = self . _make_record ( level , fmt , args , extra , exc_info , inc_stackinfo , inc_multiproc )
logstr = log_record [ 'defaultfmt' ] . format ( ** log_record )
# whoah .
if self . keep_history :
self . history . append ( logstr )
log_funcs = self . config [ level ]
to_remove = [ ]
for lf in log_funcs :
try :
lf . send ( logstr )
except StopIteration : # in the case that the log function is already closed , add it
# to the list of functions to be deleted .
to_remove . append ( lf )
for lf in to_remove :
self . remove ( level , lf )
self . info ( 'Logging function {} removed from level {}' , lf , level ) |
def packet2chain ( packet ) :
"""Fetch Scapy packet protocol chain .""" | if scapy_all is None :
raise ModuleNotFound ( "No module named 'scapy'" , name = 'scapy' )
chain = [ packet . name ]
payload = packet . payload
while not isinstance ( payload , scapy_all . packet . NoPayload ) :
chain . append ( payload . name )
payload = payload . payload
return ':' . join ( chain ) |
def Line ( pointa = ( - 0.5 , 0. , 0. ) , pointb = ( 0.5 , 0. , 0. ) , resolution = 1 ) :
"""Create a line
Parameters
pointa : np . ndarray or list
Location in [ x , y , z ] .
pointb : np . ndarray or list
Location in [ x , y , z ] .
resolution : int
number of pieces to divide line into""" | if np . array ( pointa ) . size != 3 :
raise TypeError ( 'Point A must be a length three tuple of floats.' )
if np . array ( pointb ) . size != 3 :
raise TypeError ( 'Point B must be a length three tuple of floats.' )
src = vtk . vtkLineSource ( )
src . SetPoint1 ( * pointa )
src . SetPoint2 ( * pointb )
src . SetResolution ( resolution )
src . Update ( )
return vtki . wrap ( src . GetOutput ( ) ) |
async def pop_transaction_async ( self ) :
"""Decrement async transaction depth .""" | depth = self . transaction_depth_async ( )
if depth > 0 :
depth -= 1
self . _task_data . set ( 'depth' , depth )
if depth == 0 :
conn = self . _task_data . get ( 'conn' )
self . _async_conn . release ( conn )
else :
raise ValueError ( "Invalid async transaction depth value" ) |
def hex2pub ( pub_hex : str ) -> PublicKey :
"""Convert ethereum hex to EllipticCurvePublicKey
The hex should be 65 bytes , but ethereum public key only has 64 bytes
So have to add \x04
Parameters
pub _ hex : str
Ethereum public key hex string
Returns
coincurve . PublicKey
A secp256k1 public key calculated from ethereum public key hex string
> > > data = b ' 0 ' * 32
> > > data _ hash = sha256 ( data )
> > > eth _ prv = generate _ eth _ key ( )
> > > cc _ prv = hex2prv ( eth _ prv . to _ hex ( ) )
> > > eth _ prv . sign _ msg _ hash ( data _ hash ) . to _ bytes ( ) = = cc _ prv . sign _ recoverable ( data )
True
> > > pubhex = eth _ prv . public _ key . to _ hex ( )
> > > computed _ pub = hex2pub ( pubhex )
> > > computed _ pub = = cc _ prv . public _ key
True""" | uncompressed = decode_hex ( pub_hex )
if len ( uncompressed ) == 64 :
uncompressed = b"\x04" + uncompressed
return PublicKey ( uncompressed ) |
def read_raw ( self , length , * , error = None ) :
"""Read raw packet data .""" | if length is None :
length = len ( self )
raw = dict ( packet = self . _read_fileng ( length ) , error = error or None , )
return raw |
def export ( self , new_format , filename = None , chan = None , begtime = None , endtime = None ) :
"""Export current dataset to wonambi format ( . won ) .
Parameters
new _ format : str
Format for exported record : ' edf ' or ' wonambi '
filename : str or PosixPath
filename to export to
chan : list of str , opt
list of original channel names to export . if None , all channels are
exported
begtime : int or datedelta or datetime
start of the data to read ;
if it ' s int or float , it ' s assumed it ' s s ;
if it ' s timedelta , it ' s assumed from the start of the recording ;
if it ' s datetime , it ' s assumed it ' s absolute time .
endtime : int or datedelta or datetime
end of the data to read ;
if it ' s int or float , it ' s assumed it ' s s ;
if it ' s timedelta , it ' s assumed from the start of the recording ;
if it ' s datetime , it ' s assumed it ' s absolute time .""" | dataset = self . dataset
subj_id = dataset . header [ 'subj_id' ]
if filename is None :
filename = dataset . filename
data = dataset . read_data ( chan = chan , begtime = begtime , endtime = endtime )
if 'wonambi' == new_format :
write_wonambi ( data , filename , subj_id = subj_id )
elif 'edf' == new_format :
write_edf ( data , filename , subj_id = subj_id )
else :
self . parent . statusBar ( ) . showMessage ( 'Format unrecognized.' ) |
def fromInputs ( self , inputs ) :
"""Extract the inputs associated with the child forms of this parameter
from the given dictionary and coerce them using C { self . coercer } .
@ type inputs : C { dict } mapping C { unicode } to C { list } of C { unicode }
@ param inputs : The contents of a form post , in the conventional
structure .
@ rtype : L { Deferred }
@ return : The structured data associated with this parameter represented
by the post data .""" | try :
values = inputs [ self . name ]
except KeyError :
raise ConfigurationError ( "Missing value for input: " + self . name )
return self . coerceMany ( values ) |
def save_file ( self , path : str , file_id : int = None , file_part : int = 0 , progress : callable = None , progress_args : tuple = ( ) ) :
"""Use this method to upload a file onto Telegram servers , without actually sending the message to anyone .
This is a utility method intended to be used * * only * * when working with Raw Functions ( i . e : a Telegram API
method you wish to use which is not available yet in the Client class as an easy - to - use method ) , whenever an
InputFile type is required .
Args :
path ( ` ` str ` ` ) :
The path of the file you want to upload that exists on your local machine .
file _ id ( ` ` int ` ` , * optional * ) :
In case a file part expired , pass the file _ id and the file _ part to retry uploading that specific chunk .
file _ part ( ` ` int ` ` , * optional * ) :
In case a file part expired , pass the file _ id and the file _ part to retry uploading that specific chunk .
progress ( ` ` callable ` ` , * optional * ) :
Pass a callback function to view the upload progress .
The function must take * ( client , current , total , \ * args ) * as positional arguments ( look at the section
below for a detailed description ) .
progress _ args ( ` ` tuple ` ` , * optional * ) :
Extra custom arguments for the progress callback function . Useful , for example , if you want to pass
a chat _ id and a message _ id in order to edit a message with the updated progress .
Other Parameters :
client ( : obj : ` Client < pyrogram . Client > ` ) :
The Client itself , useful when you want to call other API methods inside the callback function .
current ( ` ` int ` ` ) :
The amount of bytes uploaded so far .
total ( ` ` int ` ` ) :
The size of the file .
* args ( ` ` tuple ` ` , * optional * ) :
Extra custom arguments as defined in the * progress _ args * parameter .
You can either keep * \ * args * or add every single extra argument in your function signature .
Returns :
On success , the uploaded file is returned in form of an InputFile object .
Raises :
: class : ` RPCError < pyrogram . RPCError > ` in case of a Telegram RPC error .""" | part_size = 512 * 1024
file_size = os . path . getsize ( path )
if file_size == 0 :
raise ValueError ( "File size equals to 0 B" )
if file_size > 1500 * 1024 * 1024 :
raise ValueError ( "Telegram doesn't support uploading files bigger than 1500 MiB" )
file_total_parts = int ( math . ceil ( file_size / part_size ) )
is_big = True if file_size > 10 * 1024 * 1024 else False
is_missing_part = True if file_id is not None else False
file_id = file_id or self . rnd_id ( )
md5_sum = md5 ( ) if not is_big and not is_missing_part else None
session = Session ( self , self . dc_id , self . auth_key , is_media = True )
session . start ( )
try :
with open ( path , "rb" ) as f :
f . seek ( part_size * file_part )
while True :
chunk = f . read ( part_size )
if not chunk :
if not is_big :
md5_sum = "" . join ( [ hex ( i ) [ 2 : ] . zfill ( 2 ) for i in md5_sum . digest ( ) ] )
break
for _ in range ( 3 ) :
if is_big :
rpc = functions . upload . SaveBigFilePart ( file_id = file_id , file_part = file_part , file_total_parts = file_total_parts , bytes = chunk )
else :
rpc = functions . upload . SaveFilePart ( file_id = file_id , file_part = file_part , bytes = chunk )
if session . send ( rpc ) :
break
else :
raise AssertionError ( "Telegram didn't accept chunk #{} of {}" . format ( file_part , path ) )
if is_missing_part :
return
if not is_big :
md5_sum . update ( chunk )
file_part += 1
if progress :
progress ( self , min ( file_part * part_size , file_size ) , file_size , * progress_args )
except Client . StopTransmission :
raise
except Exception as e :
log . error ( e , exc_info = True )
else :
if is_big :
return types . InputFileBig ( id = file_id , parts = file_total_parts , name = os . path . basename ( path ) , )
else :
return types . InputFile ( id = file_id , parts = file_total_parts , name = os . path . basename ( path ) , md5_checksum = md5_sum )
finally :
session . stop ( ) |
def unlock ( self ) :
"""Closes the session to the database .""" | if not hasattr ( self , 'session' ) :
raise RuntimeError ( 'Error detected! The session that you want to close does not exist any more!' )
logger . debug ( "Closed database session of '%s'" % self . _database )
self . session . close ( )
del self . session |
def echo_info ( conn ) :
"""Print detected information .""" | click . echo ( "General information:" )
click . echo ( " Hostname: {}" . format ( conn . hostname ) )
click . echo ( " HW Family: {}" . format ( conn . family ) )
click . echo ( " HW Platform: {}" . format ( conn . platform ) )
click . echo ( " SW Type: {}" . format ( conn . os_type ) )
click . echo ( " SW Version: {}" . format ( conn . os_version ) )
click . echo ( " Prompt: {}" . format ( conn . prompt ) )
click . echo ( " Console connection: {}" . format ( conn . is_console ) )
click . echo ( "\nUDI:" )
click . echo ( " PID: {}" . format ( conn . pid ) )
click . echo ( " Description: {}" . format ( conn . description ) )
click . echo ( " Name: {}" . format ( conn . name ) )
click . echo ( " SN: {}" . format ( conn . sn ) )
click . echo ( " VID: {}" . format ( conn . vid ) ) |
def get_metrics ( self , timestamp ) :
"""Get a Metric for each registered view .
Convert each registered view ' s associated ` ViewData ` into a ` Metric ` to
be exported .
: type timestamp : : class : ` datetime . datetime `
: param timestamp : The timestamp to use for metric conversions , usually
the current time .
: rtype : Iterator [ : class : ` opencensus . metrics . export . metric . Metric ` ]""" | for vdl in self . _measure_to_view_data_list_map . values ( ) :
for vd in vdl :
metric = metric_utils . view_data_to_metric ( vd , timestamp )
if metric is not None :
yield metric |
def prt_goids ( self , prt ) :
"""Print all GO IDs in the plot , plus their color .""" | fmt = self . gosubdag . prt_attr [ 'fmta' ]
nts = sorted ( self . gosubdag . go2nt . values ( ) , key = lambda nt : [ nt . NS , nt . depth , nt . alt ] )
_get_color = self . pydotnodego . go2color . get
for ntgo in nts :
gostr = fmt . format ( ** ntgo . _asdict ( ) )
col = _get_color ( ntgo . GO , "" )
prt . write ( "{COLOR:7} {GO}\n" . format ( COLOR = col , GO = gostr ) ) |
def gzip_files ( self ) :
"""Return a list of the files compressed by this link .
This returns all files that were explicitly marked for compression .""" | ret_list = [ ]
for key , val in self . file_dict . items ( ) : # For temp files we only want files that were marked for removal
if val & FileFlags . gz_mask :
ret_list . append ( key )
return ret_list |
def df ( self , version = None , tags = None , ext = None , ** kwargs ) :
"""Loads an instance of this dataset into a dataframe .
Parameters
version : str , optional
The version of the instance of this dataset .
tags : list of str , optional
The tags associated with the desired instance of this dataset .
ext : str , optional
The file extension to use . If not given , the default extension is
used .
* * kwargs : extra keyword arguments , optional
Extra keyword arguments are forwarded to the deserialization method
of the SerializationFormat object corresponding to the extension
used .
Returns
pandas . DataFrame
A dataframe containing the desired instance of this dataset .""" | ext = self . _find_extension ( version = version , tags = tags )
if ext is None :
attribs = "{}{}" . format ( "version={} and " . format ( version ) if version else "" , "tags={}" . format ( tags ) if tags else "" , )
raise MissingDatasetError ( "No dataset with {} in local store!" . format ( attribs ) )
fpath = self . fpath ( version = version , tags = tags , ext = ext )
fmt = SerializationFormat . by_name ( ext )
return fmt . deserialize ( fpath , ** kwargs ) |
def on_message ( self , ws , message ) :
"""Websocket on _ message event handler
Saves message as RTMMessage in self . _ inbox""" | try :
data = json . loads ( message )
except Exception :
self . _set_error ( message , "decode message failed" )
else :
self . _inbox . put ( RTMMessage ( data ) ) |
def close ( self ) :
"""Connection ( s ) cleanup .""" | # TODO : DRY with workerLaunch . py
# Ensure everything is cleaned up on exit
scoop . logger . debug ( 'Closing broker on host {0}.' . format ( self . hostname ) )
# Terminate subprocesses
try :
self . shell . terminate ( )
except OSError :
pass
# Output child processes stdout and stderr to console
sys . stdout . write ( self . shell . stdout . read ( ) . decode ( "utf-8" ) )
sys . stdout . flush ( )
sys . stderr . write ( self . shell . stderr . read ( ) . decode ( "utf-8" ) )
sys . stderr . flush ( ) |
def partition_app_list ( app_list , n ) :
""": param app _ list : A list of apps with models .
: param n : Number of buckets to divide into .
: return : Partition apps into n partitions , where the number of models in each list is roughly equal . We also factor
in the app heading .""" | num_rows = sum ( [ 1 + len ( x [ 'models' ] ) for x in app_list ] )
# + 1 for app title
num_rows_per_partition = num_rows / n
result = [ [ ] for i in range ( n ) ]
# start with n empty lists of lists
partition = 0
count = 0
for a in app_list : # will the app fit in this column or overflow ?
c = len ( a [ 'models' ] ) + 1
# the + 1 is for the app title
# if we ' re not on the last partition , and the models list fits
# more on the next partition than this one , start the next partition .
if ( partition < n - 1 ) and ( count + c / 2.0 > num_rows_per_partition ) :
partition += 1
count = 0
result [ partition ] . append ( a )
count += c
return result |
def _find_snapshot ( self , name ) :
"""Find snapshot on remote by name or regular expression""" | remote_snapshots = self . _get_snapshots ( self . client )
for remote in reversed ( remote_snapshots ) :
if remote [ "Name" ] == name or re . match ( name , remote [ "Name" ] ) :
return remote
return None |
def urljoin ( base_url , url , allow_fragments = True ) :
'''Join URLs like ` ` urllib . parse . urljoin ` ` but allow scheme - relative URL .''' | if url . startswith ( '//' ) and len ( url ) > 2 :
scheme = base_url . partition ( ':' ) [ 0 ]
if scheme :
return urllib . parse . urljoin ( base_url , '{0}:{1}' . format ( scheme , url ) , allow_fragments = allow_fragments )
return urllib . parse . urljoin ( base_url , url , allow_fragments = allow_fragments ) |
def close_stream ( self ) :
"""Closes the stream . Performs cleanup .""" | self . keep_listening = False
self . stream . stop_stream ( )
self . stream . close ( )
self . pa . terminate ( ) |
def _bfgs_direction ( s , y , x , hessinv_estimate = None ) :
r"""Compute ` ` Hn ^ - 1 ( x ) ` ` for the L - BFGS method .
Parameters
s : sequence of ` LinearSpaceElement `
The ` ` s ` ` coefficients in the BFGS update , see Notes .
y : sequence of ` LinearSpaceElement `
The ` ` y ` ` coefficients in the BFGS update , see Notes .
x : ` LinearSpaceElement `
Point in which to evaluate the product .
hessinv _ estimate : ` Operator ` , optional
Initial estimate of the hessian ` ` H0 ^ - 1 ` ` .
Returns
r : ` ` x . space ` ` element
The result of ` ` Hn ^ - 1 ( x ) ` ` .
Notes
: math : ` H _ n ^ { - 1 } ` is defined recursively as
. . math : :
H _ { n + 1 } ^ { - 1 } =
\ left ( I - \ frac { s _ n y _ n ^ T } { y _ n ^ T s _ n } \ right )
H _ { n } ^ { - 1}
\ left ( I - \ frac { y _ n s _ n ^ T } { y _ n ^ T s _ n } \ right ) +
\ frac { s _ n s _ n ^ T } { y _ n ^ T \ , s _ n }
With : math : ` H _ 0 ^ { - 1 } ` given by ` ` hess _ estimate ` ` .""" | assert len ( s ) == len ( y )
r = x . copy ( )
alphas = np . zeros ( len ( s ) )
rhos = np . zeros ( len ( s ) )
for i in reversed ( range ( len ( s ) ) ) :
rhos [ i ] = 1.0 / y [ i ] . inner ( s [ i ] )
alphas [ i ] = rhos [ i ] * ( s [ i ] . inner ( r ) )
r . lincomb ( 1 , r , - alphas [ i ] , y [ i ] )
if hessinv_estimate is not None :
r = hessinv_estimate ( r )
for i in range ( len ( s ) ) :
beta = rhos [ i ] * ( y [ i ] . inner ( r ) )
r . lincomb ( 1 , r , alphas [ i ] - beta , s [ i ] )
return r |
def normalize ( self ) :
"""Return a new time series with all values normalized to 0 to 1.
: return : ` None `""" | maximum = self . max ( )
if maximum :
self . values = [ value / maximum for value in self . values ] |
def send_transaction ( self , to : Address , startgas : int , value : int = 0 , data : bytes = b'' , ) -> bytes :
"""Helper to send signed messages .
This method will use the ` privkey ` provided in the constructor to
locally sign the transaction . This requires an extended server
implementation that accepts the variables v , r , and s .""" | if to == to_canonical_address ( constants . NULL_ADDRESS ) :
warnings . warn ( 'For contract creation the empty string must be used.' )
with self . _nonce_lock :
nonce = self . _available_nonce
gas_price = self . gas_price ( )
transaction = { 'data' : data , 'gas' : startgas , 'nonce' : nonce , 'value' : value , 'gasPrice' : gas_price , }
node_gas_price = self . web3 . eth . gasPrice
log . debug ( 'Calculated gas price for transaction' , node = pex ( self . address ) , calculated_gas_price = gas_price , node_gas_price = node_gas_price , )
# add the to address if not deploying a contract
if to != b'' :
transaction [ 'to' ] = to_checksum_address ( to )
signed_txn = self . web3 . eth . account . signTransaction ( transaction , self . privkey )
log_details = { 'node' : pex ( self . address ) , 'nonce' : transaction [ 'nonce' ] , 'gasLimit' : transaction [ 'gas' ] , 'gasPrice' : transaction [ 'gasPrice' ] , }
log . debug ( 'send_raw_transaction called' , ** log_details )
tx_hash = self . web3 . eth . sendRawTransaction ( signed_txn . rawTransaction )
self . _available_nonce += 1
log . debug ( 'send_raw_transaction returned' , tx_hash = encode_hex ( tx_hash ) , ** log_details )
return tx_hash |
def _cfgs_to_read ( self ) :
"""reads config files from various locations to build final config .""" | # use these files to extend / overwrite the conf _ values .
# Last red file always overwrites existing values !
cfg = Config . DEFAULT_CONFIG_FILE_NAME
filenames = [ self . default_config_file , cfg , # conf _ values in current directory
os . path . join ( os . path . expanduser ( '~' + os . path . sep ) , cfg ) , # config in user dir
'.pyemma.cfg' , ]
# look for user defined files
if self . cfg_dir :
from glob import glob
filenames . extend ( glob ( self . cfg_dir + os . path . sep + "*.cfg" ) )
return filenames |
def _set_i2c_speed ( self , i2c_speed ) :
"""Set I2C speed to one of ' 400kHz ' , ' 100kHz ' , 50kHz ' , ' 5kHz '""" | lower_bits_mapping = { '400kHz' : 3 , '100kHz' : 2 , '50kHz' : 1 , '5kHz' : 0 , }
if i2c_speed not in lower_bits_mapping :
raise ValueError ( 'Invalid i2c_speed' )
speed_byte = 0b01100000 | lower_bits_mapping [ i2c_speed ]
self . device . write ( bytearray ( [ speed_byte ] ) )
response = self . device . read ( 1 )
if response != b"\x01" :
raise Exception ( "Changing I2C speed failed. Received: {}" . format ( repr ( response ) ) ) |
def db ( context , data ) :
"""Insert or update ` data ` as a row into specified db table""" | table = context . params . get ( "table" , context . crawler . name )
params = context . params
params [ "table" ] = table
_recursive_upsert ( context , params , data ) |
def apply_migrations ( engine , connection , path ) :
"""Apply all migrations in a chronological order""" | # Get migrations applied
migrations_applied = get_migrations_applied ( engine , connection )
# print ( migrationsApplied )
# Get migrations folder
for file in get_migrations_files ( path ) : # Set vars
basename = os . path . basename ( os . path . dirname ( file ) )
# Skip migrations if they are already applied
if is_applied ( migrations_applied , basename ) :
continue
# Get migration source
source = get_migration_source ( file )
# print ( source ) ;
# Run migration
run_migration ( connection , source , engine )
# Save migration
save_migration ( connection , basename )
# Log
print ( ' -> Migration `%s` applied' % ( basename ) )
# Log
print ( ' * Migrations applied' )
return True |
def dark ( cls ) :
"Make the current foreground color dark ." | wAttributes = cls . _get_text_attributes ( )
wAttributes &= ~ win32 . FOREGROUND_INTENSITY
cls . _set_text_attributes ( wAttributes ) |
def check_sig ( package ) :
"""check if rpm has a signature , we don ' t care if it ' s valid or not
at the moment
Shamelessly stolen from Seth Vidal
http : / / yum . baseurl . org / download / misc / checksig . py""" | rpmroot = '/'
ts = rpm . TransactionSet ( rpmroot )
sigerror = 0
ts . setVSFlags ( 0 )
hdr = return_hdr ( ts , package )
sigerror , ( sigtype , sigdate , sigid ) = get_sig_info ( hdr )
if sigid == 'None' :
keyid = 'None'
else :
keyid = sigid [ - 8 : ]
if keyid != 'None' :
return True
else :
return False |
def remove_send_last_message ( self , connection ) :
"""Removes a send _ last _ message function previously registered
with the Dispatcher .
Args :
connection ( str ) : A locally unique identifier provided
by the receiver of messages .""" | if connection in self . _send_last_message :
del self . _send_last_message [ connection ]
LOGGER . debug ( "Removed send_last_message function " "for connection %s" , connection )
else :
LOGGER . warning ( "Attempted to remove send_last_message " "function for connection %s, but no " "send_last_message function was registered" , connection ) |
def load_layer_from_registry ( layer_path ) :
"""Helper method to load a layer from registry if its already there .
If the layer is not loaded yet , it will create the QgsMapLayer on the fly .
: param layer _ path : Layer source path .
: type layer _ path : str
: return : Vector layer
: rtype : QgsVectorLayer
. . versionadded : 4.3.0""" | # reload the layer in case the layer path has no provider information
the_layer = load_layer ( layer_path ) [ 0 ]
layers = QgsProject . instance ( ) . mapLayers ( )
for _ , layer in list ( layers . items ( ) ) :
if full_layer_uri ( layer ) == full_layer_uri ( the_layer ) :
monkey_patch_keywords ( layer )
return layer
return the_layer |
def schedule_host_check ( self , host , check_time ) :
"""Schedule a check on a host
Format of the line that triggers function call : :
SCHEDULE _ HOST _ CHECK ; < host _ name > ; < check _ time >
: param host : host to check
: type host : alignak . object . host . Host
: param check _ time : time to check
: type check _ time :
: return : None""" | host . schedule ( self . daemon . hosts , self . daemon . services , self . daemon . timeperiods , self . daemon . macromodulations , self . daemon . checkmodulations , self . daemon . checks , force = False , force_time = check_time )
self . send_an_element ( host . get_update_status_brok ( ) ) |
def get_opt_tags ( self , section , option , tags ) :
"""Supplement to ConfigParser . ConfigParser . get ( ) . This will search for an
option in [ section ] and if it doesn ' t find it will also try in
[ section - tag ] for every value of tag in tags .
Will raise a ConfigParser . Error if it cannot find a value .
Parameters
self : ConfigParser object
The ConfigParser object ( automatically passed when this is appended
to the ConfigParser class )
section : string
The section of the ConfigParser object to read
option : string
The ConfigParser option to look for
tags : list of strings
The name of subsections to look in , if not found in [ section ]
Returns
string
The value of the options being searched for""" | # Need lower case tag name ; also exclude cases with tag = None
if tags :
tags = [ tag . lower ( ) for tag in tags if tag is not None ]
try :
return self . get ( section , option )
except ConfigParser . Error :
err_string = "No option '%s' in section [%s] " % ( option , section )
if not tags :
raise ConfigParser . Error ( err_string + "." )
return_vals = [ ]
sub_section_list = [ ]
for sec_len in range ( 1 , len ( tags ) + 1 ) :
for tag_permutation in itertools . permutations ( tags , sec_len ) :
joined_name = '-' . join ( tag_permutation )
sub_section_list . append ( joined_name )
section_list = [ "%s-%s" % ( section , sb ) for sb in sub_section_list ]
err_section_list = [ ]
for sub in sub_section_list :
if self . has_section ( '%s-%s' % ( section , sub ) ) :
if self . has_option ( '%s-%s' % ( section , sub ) , option ) :
err_section_list . append ( "%s-%s" % ( section , sub ) )
return_vals . append ( self . get ( '%s-%s' % ( section , sub ) , option ) )
# We also want to recursively go into sections
if not return_vals :
err_string += "or in sections [%s]." % ( "] [" . join ( section_list ) )
raise ConfigParser . Error ( err_string )
if len ( return_vals ) > 1 :
err_string += "and multiple entries found in sections [%s]." % ( "] [" . join ( err_section_list ) )
raise ConfigParser . Error ( err_string )
return return_vals [ 0 ] |
def populate_times ( self ) :
"""Populates all different meta data times that comes with measurement if
they are present .""" | stop_time = self . meta_data . get ( "stop_time" )
if stop_time :
stop_naive = datetime . utcfromtimestamp ( stop_time )
self . stop_time = stop_naive . replace ( tzinfo = tzutc ( ) )
creation_time = self . meta_data . get ( "creation_time" )
if creation_time :
creation_naive = datetime . utcfromtimestamp ( creation_time )
self . creation_time = creation_naive . replace ( tzinfo = tzutc ( ) )
start_time = self . meta_data . get ( "start_time" )
if start_time :
start_naive = datetime . utcfromtimestamp ( start_time )
self . start_time = start_naive . replace ( tzinfo = tzutc ( ) ) |
def add_edge ( self , start , end , ** kwargs ) :
"""Add an edge between two nodes .
The nodes will be automatically added if they are not present in the network .
Parameters
start : tuple
Both the start and end nodes should specify the time slice as
( node _ name , time _ slice ) . Here , node _ name can be any hashable
python object while the time _ slice is an integer value ,
which denotes the time slice that the node belongs to .
end : tuple
Both the start and end nodes should specify the time slice as
( node _ name , time _ slice ) . Here , node _ name can be any hashable
python object while the time _ slice is an integer value ,
which denotes the time slice that the node belongs to .
Examples
> > > from pgmpy . models import DynamicBayesianNetwork as DBN
> > > model = DBN ( )
> > > model . add _ nodes _ from ( [ ' D ' , ' I ' ] )
> > > model . add _ edge ( ( ' D ' , 0 ) , ( ' I ' , 0 ) )
> > > sorted ( model . edges ( ) )
[ ( ( ' D ' , 0 ) , ( ' I ' , 0 ) ) , ( ( ' D ' , 1 ) , ( ' I ' , 1 ) ) ]""" | try :
if len ( start ) != 2 or len ( end ) != 2 :
raise ValueError ( 'Nodes must be of type (node, time_slice).' )
elif not isinstance ( start [ 1 ] , int ) or not isinstance ( end [ 1 ] , int ) :
raise ValueError ( 'Nodes must be of type (node, time_slice).' )
elif start [ 1 ] == end [ 1 ] :
start = ( start [ 0 ] , 0 )
end = ( end [ 0 ] , 0 )
elif start [ 1 ] == end [ 1 ] - 1 :
start = ( start [ 0 ] , 0 )
end = ( end [ 0 ] , 1 )
elif start [ 1 ] > end [ 1 ] :
raise NotImplementedError ( 'Edges in backward direction are not allowed.' )
elif start [ 1 ] != end [ 1 ] :
raise ValueError ( "Edges over multiple time slices is not currently supported" )
except TypeError :
raise ValueError ( 'Nodes must be of type (node, time_slice).' )
if start == end :
raise ValueError ( 'Self Loops are not allowed' )
elif start in super ( DynamicBayesianNetwork , self ) . nodes ( ) and end in super ( DynamicBayesianNetwork , self ) . nodes ( ) and nx . has_path ( self , end , start ) :
raise ValueError ( 'Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.' . format ( start = str ( start ) , end = str ( end ) ) )
super ( DynamicBayesianNetwork , self ) . add_edge ( start , end , ** kwargs )
if start [ 1 ] == end [ 1 ] :
super ( DynamicBayesianNetwork , self ) . add_edge ( ( start [ 0 ] , 1 - start [ 1 ] ) , ( end [ 0 ] , 1 - end [ 1 ] ) )
else :
super ( DynamicBayesianNetwork , self ) . add_node ( ( end [ 0 ] , 1 - end [ 1 ] ) ) |
def clean ( self ) :
"""Always raise the default error message , because we don ' t
care what they entered here .""" | raise forms . ValidationError ( self . error_messages [ 'invalid_login' ] , code = 'invalid_login' , params = { 'username' : self . username_field . verbose_name } ) |
def as_dict ( self ) :
"""Pre - serialisation of the meta data""" | drepr = { }
drepr [ "name" ] = self . name
drepr [ "time" ] = self . time
# error pre - serialisation
drepr [ "errors" ] = [ str ( err ) for err in self . errors ]
# warning pre - serialisation
drepr [ "warnings" ] = [ str ( warn ) for warn in self . warnings ]
return drepr |
def update ( * args , ** kwargs ) :
'''Update installed plugin package ( s ) .
Each plugin package must have a directory ( * * NOT * * a link ) containing a
` ` properties . yml ` ` file with a ` ` package _ name ` ` value in the following
directory :
< conda prefix > / share / microdrop / plugins / available /
Parameters
* args
Extra arguments to pass to Conda ` ` install ` ` command .
See : func : ` install ` .
package _ name : str or list , optional
Name ( s ) of MicroDrop plugin Conda package ( s ) to update .
By default , all installed packages are updated .
* * kwargs
See : func : ` install ` .
Returns
dict
Conda installation log object ( from JSON ` ` conda install ` ` output ) .
Notes
Only actual plugin directories are considered when updating ( i . e . , * * NOT * *
directory links ) .
This permits , for example , linking of a plugin into the ` ` available ` `
plugins directory during development without risking overwriting during an
update .
Raises
RuntimeError
If one or more installed plugin packages cannot be updated .
This can happen , for example , if the plugin package is not available in
any of the specified Conda channels .
See also
: func : ` installed _ plugins `''' | package_name = kwargs . pop ( 'package_name' , None )
# Only consider * * installed * * plugins ( see ` installed _ plugins ( ) ` docstring ) .
installed_plugins_ = installed_plugins ( only_conda = True )
if installed_plugins_ :
plugin_packages = [ plugin_i [ 'package_name' ] for plugin_i in installed_plugins_ ]
if package_name is None :
package_name = plugin_packages
elif isinstance ( package_name , types . StringTypes ) :
package_name = [ package_name ]
logger . info ( 'Installing any available updates for plugins: %s' , ',' . join ( '`{}`' . format ( package_name_i ) for package_name_i in package_name ) )
# Attempt to install plugin packages .
try :
install_log = install ( package_name , * args , ** kwargs )
except RuntimeError , exception :
if 'CondaHTTPError' in str ( exception ) :
raise IOError ( 'Error accessing update server.' )
else :
raise
if 'actions' in install_log :
logger . debug ( 'Updated plugin(s): ```%s```' , install_log [ 'actions' ] )
return install_log
else :
return { } |
def _get_id_from_username ( self , username ) :
"""Looks up a username ' s id
: param string username : Username to lookup
: returns : The id that matches username .""" | _mask = "mask[id, username]"
_filter = { 'users' : { 'username' : utils . query_filter ( username ) } }
user = self . list_users ( _mask , _filter )
if len ( user ) == 1 :
return [ user [ 0 ] [ 'id' ] ]
elif len ( user ) > 1 :
raise exceptions . SoftLayerError ( "Multiple users found with the name: %s" % username )
else :
raise exceptions . SoftLayerError ( "Unable to find user id for %s" % username ) |
def list_to_csv_response ( data , title = 'report' , header = None , widths = None ) :
"""Make 2D list into a csv response for download data .""" | response = HttpResponse ( content_type = "text/csv; charset=UTF-8" )
cw = csv . writer ( response )
for row in chain ( [ header ] if header else [ ] , data ) :
cw . writerow ( [ force_text ( s ) . encode ( response . charset ) for s in row ] )
return response |
def __sort_up ( self ) :
"""Sort the updatable objects according to ascending order""" | if self . __do_need_sort_up :
self . __up_objects . sort ( key = cmp_to_key ( self . __up_cmp ) )
self . __do_need_sort_up = False |
def create ( cls , api , default_name = None , description = None ) :
"""http : / / docs . fiesta . cc / list - management - api . html # creating - a - group
200 character max on the description .""" | path = 'group'
data = { }
if default_name :
data [ 'default_group_name' ] = default_name
if description :
data [ 'description' ] = description
if api . domain :
data [ 'domain' ] = api . domain
response_data = api . request ( path , data = data )
id = response_data [ 'group_id' ]
group = cls ( api , id )
group . default_name = response_data [ 'default_group_name' ]
return group |
def list_combiner ( value , mutator , * args , ** kwargs ) :
"""Expects the output of the source to be a list to which
the result of each mutator is appended .""" | value . append ( mutator ( * args , ** kwargs ) )
return value |
def _validate ( item ) :
"""Validate ( instance , prop name ) tuple""" | if not isinstance ( item , tuple ) or len ( item ) != 2 :
raise ValueError ( 'Linked items must be instance/prop-name tuple' )
if not isinstance ( item [ 0 ] , tuple ( LINK_OBSERVERS ) ) :
raise ValueError ( 'Only {} instances may be linked' . format ( ', ' . join ( [ link_cls . __name__ for link_cls in LINK_OBSERVERS ] ) ) )
if not isinstance ( item [ 1 ] , string_types ) :
raise ValueError ( 'Properties must be specified as string names' )
if not hasattr ( item [ 0 ] , item [ 1 ] ) :
raise ValueError ( 'Invalid property {} for {} instance' . format ( item [ 1 ] , item [ 0 ] . __class__ . __name__ ) ) |
def _filter_queryset ( self , perms , queryset ) :
"""Filter object objects by permissions of user in request .""" | user = self . request . user if self . request else AnonymousUser ( )
return get_objects_for_user ( user , perms , queryset ) |
def text_ratio ( self ) :
"""Return a measure of the sequences ' word similarity ( float in [ 0,1 ] ) .
Each word has weight equal to its length for this measure
> > > m = WordMatcher ( a = [ ' abcdef ' , ' 12 ' ] , b = [ ' abcdef ' , ' 34 ' ] ) # 3/4 of the text is the same
> > > ' % . 3f ' % m . ratio ( ) # normal ratio fails
'0.500'
> > > ' % . 3f ' % m . text _ ratio ( ) # text ratio is accurate
'0.750'""" | return _calculate_ratio ( self . match_length ( ) , self . _text_length ( self . a ) + self . _text_length ( self . b ) , ) |
def fn2lun ( fname ) :
"""Internal undocumented command for mapping name of open file to
its FORTRAN ( F2C ) logical unit .
https : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / FORTRAN / spicelib / fn2lun . html
: param fname : name of the file to be mapped to its logical unit .
: type fname : str
: return : the FORTRAN ( F2C ) logical unit associated with the filename .
: rtype : int""" | fnameP = stypes . stringToCharP ( fname )
unit_out = ctypes . c_int ( )
fname_len = ctypes . c_int ( len ( fname ) + 1 )
libspice . fn2lun_ ( fnameP , ctypes . byref ( unit_out ) , fname_len )
return unit_out . value |
def _post ( self , data , file_to_upload = None ) :
"""Make the POST request .""" | # pylint : disable = E1101
params = { "JSONRPC" : simplejson . dumps ( data ) }
req = None
if file_to_upload :
req = http_core . HttpRequest ( self . write_url )
req . method = 'POST'
req . add_body_part ( "JSONRPC" , simplejson . dumps ( data ) , 'text/plain' )
upload = file ( file_to_upload , "rb" )
req . add_body_part ( "filePath" , upload , 'application/octet-stream' )
req . end_of_parts ( )
content_type = "multipart/form-data; boundary=%s" % http_core . MIME_BOUNDARY
req . headers [ 'Content-Type' ] = content_type
req . headers [ 'User-Agent' ] = config . USER_AGENT
req = http_core . ProxiedHttpClient ( ) . request ( req )
else :
msg = urllib . urlencode ( { 'json' : params [ 'JSONRPC' ] } )
req = urllib2 . urlopen ( self . write_url , msg )
if req :
result = simplejson . loads ( req . read ( ) )
if 'error' in result and result [ 'error' ] :
exceptions . BrightcoveError . raise_exception ( result [ 'error' ] )
return result [ 'result' ] |
def nearest_neighbors ( coordinates_a , coordinates_b , periodic , r = None , n = None ) :
'''Nearest neighbor search between two arrays of coordinates . Notice that
you can control the result by selecting neighbors either by radius * r * or
by number * n * . The algorithm uses a periodic variant of KDTree to reach a
Nlog ( N ) time complexity .
: param np . ndarray coordinates _ a : Either an array of coordinates of shape ( N , 3)
or a single point of shape ( 3 , )
: param np . ndarray coordinates _ b : Same as coordinates _ a
: param np . ndarray periodic : Either a matrix of box vectors ( 3 , 3 ) or an
array of box lengths of shape ( 3 , ) . Only
orthogonal boxes are supported .
: param float r : Radius of neighbor search
: param int n : Number of nearest neighbors to return''' | if r is None and n is None :
raise ValueError ( 'You should pass either the parameter n or r' )
coordinates_a = _check_coordinates ( coordinates_a )
coordinates_b = _check_coordinates ( coordinates_b )
periodic = _check_periodic ( periodic )
kdtree = PeriodicCKDTree ( periodic , coordinates_b )
if r is not None :
neigh = kdtree . query_ball_point ( coordinates_a , r )
if isnested ( neigh ) :
return ( neigh , [ coordinates_b . take ( nb , axis = 0 ) for nb in neigh ] )
else :
return ( neigh , coordinates_b . take ( neigh , axis = 0 ) ) if len ( neigh ) != 0 else ( [ ] , [ ] )
if n is not None :
neigh = kdtree . query ( coordinates_b , n )
return neigh if len ( neigh ) != 0 else ( [ ] , [ ] ) |
def is_task_running ( self , task , connection_failure_control = None ) :
"""Check if a task is running according to : TASK _ PENDING _ STATES [ ' New ' , ' Starting ' ,
' Pending ' , ' Running ' , ' Suspended ' , ' Stopping ' ]
Args :
task ( dict ) : OneView Task resource .
connection _ failure _ control ( dict ) :
A dictionary instance that contains last _ success for error tolerance control .
Examples :
> > > connection _ failure _ control = dict ( last _ success = int ( time . time ( ) ) )
> > > while self . is _ task _ running ( task , connection _ failure _ control ) :
> > > pass
Returns :
True when in TASK _ PENDING _ STATES ; False when not .""" | if 'uri' in task :
try :
task = self . get ( task )
if connection_failure_control : # Updates last success
connection_failure_control [ 'last_success' ] = self . get_current_seconds ( )
if 'taskState' in task and task [ 'taskState' ] in TASK_PENDING_STATES :
return True
except Exception as error :
logger . error ( '; ' . join ( str ( e ) for e in error . args ) + ' when waiting for the task: ' + str ( task ) )
if not connection_failure_control :
raise error
if hasattr ( error , 'errno' ) and error . errno in self . CONNECTION_FAILURE_ERROR_NUMBERS :
last_success = connection_failure_control [ 'last_success' ]
if last_success + self . CONNECTION_FAILURE_TIMEOUT < self . get_current_seconds ( ) : # Timeout reached
raise error
else : # Return task is running when network instability occurs
return True
else :
raise error
return False |
async def issuer_revoke_credential ( wallet_handle : int , blob_storage_reader_handle : int , rev_reg_id : str , cred_revoc_id : str ) -> str :
"""Revoke a credential identified by a cred _ revoc _ id ( returned by issuer _ create _ credential ) .
The corresponding credential definition and revocation registry must be already
created an stored into the wallet .
This call returns revoc registry delta as json file intended to be shared as REVOC _ REG _ ENTRY transaction .
Note that it is possible to accumulate deltas to reduce ledger load .
: param wallet _ handle : wallet handler ( created by open _ wallet ) .
: param blob _ storage _ reader _ handle : pre - configured blob storage reader instance handle that will allow
to read revocation tails
: param rev _ reg _ id : id of revocation registry stored in wallet
: param cred _ revoc _ id : local id for revocation info
: return : Revocation registry delta json with a revoked credential .""" | logger = logging . getLogger ( __name__ )
logger . debug ( "issuer_revoke_credential: >>> wallet_handle: %r, blob_storage_reader_handle: %r, rev_reg_id: %r, " "cred_revoc_id: %r" , wallet_handle , blob_storage_reader_handle , rev_reg_id , cred_revoc_id )
if not hasattr ( issuer_revoke_credential , "cb" ) :
logger . debug ( "issuer_revoke_credential: Creating callback" )
issuer_revoke_credential . cb = create_cb ( CFUNCTYPE ( None , c_int32 , c_int32 , c_char_p ) )
c_wallet_handle = c_int32 ( wallet_handle )
c_blob_storage_reader_handle = c_int32 ( blob_storage_reader_handle )
c_rev_reg_id = c_char_p ( rev_reg_id . encode ( 'utf-8' ) )
c_cred_revoc_id = c_char_p ( cred_revoc_id . encode ( 'utf-8' ) )
revoc_reg_delta_json = await do_call ( 'indy_issuer_revoke_credential' , c_wallet_handle , c_blob_storage_reader_handle , c_rev_reg_id , c_cred_revoc_id , issuer_revoke_credential . cb )
res = revoc_reg_delta_json . decode ( )
logger . debug ( "issuer_revoke_credential: <<< res: %r" , res )
return res |
def send ( self , command , payload ) :
"""Send a WorkRequest to containing command and payload to the queue specified in config .
: param command : str : name of the command we want run by WorkQueueProcessor
: param payload : object : pickable data to be used when running the command""" | request = WorkRequest ( command , payload )
logging . info ( "Sending {} message to queue {}." . format ( request . command , self . queue_name ) )
# setting protocol to version 2 to be compatible with python2
self . connection . send_durable_message ( self . queue_name , pickle . dumps ( request , protocol = 2 ) )
logging . info ( "Sent {} message." . format ( request . command , self . queue_name ) ) |
def delete ( self , name , kind = None ) :
"""Removes an input from the collection .
: param ` kind ` : The kind of input :
- " ad " : Active Directory
- " monitor " : Files and directories
- " registry " : Windows Registry
- " script " : Scripts
- " splunktcp " : TCP , processed
- " tcp " : TCP , unprocessed
- " udp " : UDP
- " win - event - log - collections " : Windows event log
- " win - perfmon " : Performance monitoring
- " win - wmi - collections " : WMI
: type kind : ` ` string ` `
: param name : The name of the input to remove .
: type name : ` ` string ` `
: return : The : class : ` Inputs ` collection .""" | if kind is None :
self . service . delete ( self [ name ] . path )
else :
self . service . delete ( self [ name , kind ] . path )
return self |
def get_archiver ( self , kind ) :
"""Returns instance of archiver class specific to given kind
: param kind : archive kind""" | archivers = { 'tar' : TarArchiver , 'tbz2' : Tbz2Archiver , 'tgz' : TgzArchiver , 'zip' : ZipArchiver , }
return archivers [ kind ] ( ) |
def updateBar ( self , bar ) :
"""更新K线""" | self . count += 1
if not self . inited and self . count >= self . size :
self . inited = True
self . openArray [ 0 : self . size - 1 ] = self . openArray [ 1 : self . size ]
self . highArray [ 0 : self . size - 1 ] = self . highArray [ 1 : self . size ]
self . lowArray [ 0 : self . size - 1 ] = self . lowArray [ 1 : self . size ]
self . closeArray [ 0 : self . size - 1 ] = self . closeArray [ 1 : self . size ]
self . volumeArray [ 0 : self . size - 1 ] = self . volumeArray [ 1 : self . size ]
self . openArray [ - 1 ] = bar . open
self . highArray [ - 1 ] = bar . high
self . lowArray [ - 1 ] = bar . low
self . closeArray [ - 1 ] = bar . close
self . volumeArray [ - 1 ] = bar . volume |
def fancy_transpose ( data , roll = 1 ) :
"""Fancy transpose
This method transposes a multidimensional matrix .
Parameters
data : np . ndarray
Input data array
roll : int
Roll direction and amount . Default ( roll = 1)
Returns
np . ndarray transposed data
Notes
Adjustment to numpy . transpose
Examples
> > > from modopt . base . np _ adjust import fancy _ transpose
> > > x = np . arange ( 27 ) . reshape ( 3 , 3 , 3)
array ( [ [ [ 0 , 1 , 2 ] ,
[ 3 , 4 , 5 ] ,
[ 6 , 7 , 8 ] ] ,
[ [ 9 , 10 , 11 ] ,
[12 , 13 , 14 ] ,
[15 , 16 , 17 ] ] ,
[ [ 18 , 19 , 20 ] ,
[21 , 22 , 23 ] ,
[24 , 25 , 26 ] ] ] )
> > > fancy _ transpose ( x )
array ( [ [ [ 0 , 3 , 6 ] ,
[ 9 , 12 , 15 ] ,
[18 , 21 , 24 ] ] ,
[ [ 1 , 4 , 7 ] ,
[10 , 13 , 16 ] ,
[19 , 22 , 25 ] ] ,
[ [ 2 , 5 , 8 ] ,
[11 , 14 , 17 ] ,
[20 , 23 , 26 ] ] ] )
> > > fancy _ transpose ( x , roll = - 1)
array ( [ [ [ 0 , 9 , 18 ] ,
[ 1 , 10 , 19 ] ,
[ 2 , 11 , 20 ] ] ,
[ [ 3 , 12 , 21 ] ,
[ 4 , 13 , 22 ] ,
[ 5 , 14 , 23 ] ] ,
[ [ 6 , 15 , 24 ] ,
[ 7 , 16 , 25 ] ,
[ 8 , 17 , 26 ] ] ] )""" | axis_roll = np . roll ( np . arange ( data . ndim ) , roll )
return np . transpose ( data , axes = axis_roll ) |
def _int_from_str ( string ) :
"""Convert string into integer
Raise :
TypeError if string is not a valid integer""" | float_num = float ( string )
int_num = int ( float_num )
if float_num == int_num :
return int_num
else : # Needed to handle pseudos with fractional charge
int_num = np . rint ( float_num )
logger . warning ( "Converting float %s to int %s" % ( float_num , int_num ) )
return int_num |
def to_python ( self , value ) :
"""Converts any value to a base . Version field .""" | if value is None or value == '' :
return value
if isinstance ( value , base . Version ) :
return value
if self . coerce :
return base . Version . coerce ( value , partial = self . partial )
else :
return base . Version ( value , partial = self . partial ) |
def get_template_names ( self ) :
"""Adds crud _ template _ name to default template names .""" | names = super ( CRUDMixin , self ) . get_template_names ( )
if self . crud_template_name :
names . append ( self . crud_template_name )
return names |
def filter ( self , filter_func ) :
"""Return a new SampleCollection containing only samples meeting the filter criteria .
Will pass any kwargs ( e . g . , field or skip _ missing ) used when instantiating the current class
on to the new SampleCollection that is returned .
Parameters
filter _ func : ` callable `
A function that will be evaluated on every object in the collection . The function must
return a ` bool ` . If True , the object will be kept . If False , it will be removed from the
SampleCollection that is returned .
Returns
` onecodex . models . SampleCollection ` containing only objects ` filter _ func ` returned True on .
Examples
Generate a new collection of Samples that have a specific filename extension :
new _ collection = samples . filter ( lambda s : s . filename . endswith ( ' . fastq . gz ' ) )""" | if callable ( filter_func ) :
return self . __class__ ( [ obj for obj in self if filter_func ( obj ) is True ] , ** self . _kwargs )
else :
raise OneCodexException ( "Expected callable for filter, got: {}" . format ( type ( filter_func ) . __name__ ) ) |
def run ( self , cmd ) :
"""Similar to profile . Profile . run .""" | import __main__
dict = __main__ . __dict__
return self . runctx ( cmd , dict , dict ) |
def set_value ( self , items , complete = False , on_projects = False , on_globals = False , projectname = None , base = '' , dtype = None , ** kwargs ) :
"""Set a value in the configuration
Parameters
items : dict
A dictionary whose keys correspond to the item in the configuration
and whose values are what shall be inserted . % ( get _ value _ note ) s
% ( ModelOrganizer . info . common _ params ) s
base : str
A base string that shall be put in front of each key in ` values ` to
avoid typing it all the time
dtype : str
The name of the data type or a data type to cast the value to""" | def identity ( val ) :
return val
config = self . info ( complete = complete , on_projects = on_projects , on_globals = on_globals , projectname = projectname , return_dict = True , insert_id = False , ** kwargs )
if isinstance ( dtype , six . string_types ) :
dtype = getattr ( builtins , dtype )
elif dtype is None :
dtype = identity
for key , value in six . iteritems ( dict ( items ) ) :
if base :
key = base + key
key , sub_config = utils . go_through_dict ( key , config , setdefault = OrderedDict )
if key in self . paths :
if isinstance ( value , six . string_types ) :
value = osp . abspath ( value )
else :
value = list ( map ( osp . abspath , value ) )
sub_config [ key ] = dtype ( value ) |
def com_google_fonts_check_metadata_normal_style ( ttFont , font_metadata ) :
"""METADATA . pb font . style " normal " matches font internals ?""" | from fontbakery . utils import get_name_entry_strings
from fontbakery . constants import MacStyle
if font_metadata . style != "normal" :
yield SKIP , "This check only applies to normal fonts."
# FIXME : declare a common condition called " normal _ style "
else :
font_familyname = get_name_entry_strings ( ttFont , NameID . FONT_FAMILY_NAME )
font_fullname = get_name_entry_strings ( ttFont , NameID . FULL_FONT_NAME )
if len ( font_familyname ) == 0 or len ( font_fullname ) == 0 :
yield SKIP , ( "Font lacks familyname and/or" " fullname entries in name table." )
# FIXME : This is the same SKIP condition as in check / metadata / italic _ style
# so we definitely need to address them with a common condition !
else :
font_familyname = font_familyname [ 0 ]
font_fullname = font_fullname [ 0 ]
if bool ( ttFont [ "head" ] . macStyle & MacStyle . ITALIC ) :
yield FAIL , Message ( "bad-macstyle" , ( "METADATA.pb style has been set to normal" " but font macStyle is improperly set." ) )
elif font_familyname . split ( "-" ) [ - 1 ] . endswith ( 'Italic' ) :
yield FAIL , Message ( "familyname-italic" , ( "Font macStyle indicates a non-Italic font, but" " nameID {} (FONT_FAMILY_NAME: \"{}\") ends with" " \"Italic\"." ) . format ( NameID . FONT_FAMILY_NAME , font_familyname ) )
elif font_fullname . split ( "-" ) [ - 1 ] . endswith ( "Italic" ) :
yield FAIL , Message ( "fullfont-italic" , ( "Font macStyle indicates a non-Italic font but" " nameID {} (FULL_FONT_NAME: \"{}\") ends with" " \"Italic\"." ) . format ( NameID . FULL_FONT_NAME , font_fullname ) )
else :
yield PASS , ( "METADATA.pb font.style \"normal\"" " matches font internals." ) |
def decommission_brokers ( self , broker_ids ) :
"""Decommission a list of brokers trying to keep the replication group
the brokers belong to balanced .
: param broker _ ids : list of string representing valid broker ids in the cluster
: raises : InvalidBrokerIdError when the id is invalid .""" | groups = set ( )
for b_id in broker_ids :
try :
broker = self . cluster_topology . brokers [ b_id ]
except KeyError :
self . log . error ( "Invalid broker id %s." , b_id )
# Raise an error for now . As alternative we may ignore the
# invalid id and continue with the others .
raise InvalidBrokerIdError ( "Broker id {} does not exist in cluster" . format ( b_id ) , )
broker . mark_decommissioned ( )
groups . add ( broker . replication_group )
for group in groups :
self . _decommission_brokers_in_group ( group ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.