signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def project_delete_notif ( self , tenant_id , tenant_name ) :
"""Tenant Delete notification .""" | if not self . fw_init :
return
rtr_name = '_' . join ( [ fw_constants . TENANT_EDGE_RTR , tenant_name ] )
self . os_helper . delete_router_by_name ( rtr_name , tenant_id ) |
def create_api_object_type ( self ) :
"""Get an instance of Api Vip Requests services facade .""" | return ApiObjectType ( self . networkapi_url , self . user , self . password , self . user_ldap ) |
def show_dbs ( ) :
"""return a list of all dbs and related collections .
Return an empty list on error .""" | l = [ ]
mc = client_connector ( )
if not mc : # The client couldn ' t connect
return ( )
dbs = mc . database_names ( )
for d in dbs :
dbc = mc [ d ]
collections = dbc . collection_names ( )
collections = remove_values_from_list ( collections , "system.indexes" )
l . append ( { "name" : d , "collections" : collections } )
return tuple ( l ) |
def make_trigrams ( i : Iterable [ T ] ) -> Iterable [ Tuple [ Optional [ T ] , Optional [ T ] , Optional [ T ] ] ] :
"""Compute all trigrams of an iterable and yield them . You probably want
to do something like :
> > > t = set ( make _ trigrams ( ' hi there ' ) )""" | q = deque ( [ None , None , None ] )
def nxt ( ) :
q . append ( x )
q . popleft ( )
return tuple ( c if c is not None else ' ' for c in q )
for x in i :
yield nxt ( )
if q [ - 1 ] is not None :
x = None
yield nxt ( ) |
def previous_day ( self ) :
"""Return the HDate for the previous day .""" | return HDate ( self . gdate + datetime . timedelta ( - 1 ) , self . diaspora , self . hebrew ) |
def get_max_counts ( samples ) :
"""Retrieve number of regions that can be processed in parallel from current samples .""" | counts = [ ]
for data in ( x [ 0 ] for x in samples ) :
count = tz . get_in ( [ "config" , "algorithm" , "callable_count" ] , data , 1 )
vcs = tz . get_in ( [ "config" , "algorithm" , "variantcaller" ] , data , [ ] )
if isinstance ( vcs , six . string_types ) :
vcs = [ vcs ]
if vcs :
count *= len ( vcs )
counts . append ( count )
return max ( counts ) |
def reset_index ( self , level = None , drop = False , inplace = False , col_level = 0 , col_fill = '' ) :
"""Reset the index , or a level of it .
Reset the index of the DataFrame , and use the default one instead .
If the DataFrame has a MultiIndex , this method can remove one or more
levels .
Parameters
level : int , str , tuple , or list , default None
Only remove the given levels from the index . Removes all levels by
default .
drop : bool , default False
Do not try to insert index into dataframe columns . This resets
the index to the default integer index .
inplace : bool , default False
Modify the DataFrame in place ( do not create a new object ) .
col _ level : int or str , default 0
If the columns have multiple levels , determines which level the
labels are inserted into . By default it is inserted into the first
level .
col _ fill : object , default ' '
If the columns have multiple levels , determines how the other
levels are named . If None then the index name is repeated .
Returns
DataFrame
DataFrame with the new index .
See Also
DataFrame . set _ index : Opposite of reset _ index .
DataFrame . reindex : Change to new indices or expand indices .
DataFrame . reindex _ like : Change to same indices as other DataFrame .
Examples
> > > df = pd . DataFrame ( [ ( ' bird ' , 389.0 ) ,
. . . ( ' bird ' , 24.0 ) ,
. . . ( ' mammal ' , 80.5 ) ,
. . . ( ' mammal ' , np . nan ) ] ,
. . . index = [ ' falcon ' , ' parrot ' , ' lion ' , ' monkey ' ] ,
. . . columns = ( ' class ' , ' max _ speed ' ) )
> > > df
class max _ speed
falcon bird 389.0
parrot bird 24.0
lion mammal 80.5
monkey mammal NaN
When we reset the index , the old index is added as a column , and a
new sequential index is used :
> > > df . reset _ index ( )
index class max _ speed
0 falcon bird 389.0
1 parrot bird 24.0
2 lion mammal 80.5
3 monkey mammal NaN
We can use the ` drop ` parameter to avoid the old index being added as
a column :
> > > df . reset _ index ( drop = True )
class max _ speed
0 bird 389.0
1 bird 24.0
2 mammal 80.5
3 mammal NaN
You can also use ` reset _ index ` with ` MultiIndex ` .
> > > index = pd . MultiIndex . from _ tuples ( [ ( ' bird ' , ' falcon ' ) ,
. . . ( ' bird ' , ' parrot ' ) ,
. . . ( ' mammal ' , ' lion ' ) ,
. . . ( ' mammal ' , ' monkey ' ) ] ,
. . . names = [ ' class ' , ' name ' ] )
> > > columns = pd . MultiIndex . from _ tuples ( [ ( ' speed ' , ' max ' ) ,
. . . ( ' species ' , ' type ' ) ] )
> > > df = pd . DataFrame ( [ ( 389.0 , ' fly ' ) ,
. . . ( 24.0 , ' fly ' ) ,
. . . ( 80.5 , ' run ' ) ,
. . . ( np . nan , ' jump ' ) ] ,
. . . index = index ,
. . . columns = columns )
> > > df
speed species
max type
class name
bird falcon 389.0 fly
parrot 24.0 fly
mammal lion 80.5 run
monkey NaN jump
If the index has multiple levels , we can reset a subset of them :
> > > df . reset _ index ( level = ' class ' )
class speed species
max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we are not dropping the index , by default , it is placed in the top
level . We can place it in another level :
> > > df . reset _ index ( level = ' class ' , col _ level = 1)
speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
When the index is inserted under another level , we can specify under
which one with the parameter ` col _ fill ` :
> > > df . reset _ index ( level = ' class ' , col _ level = 1 , col _ fill = ' species ' )
species speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump
If we specify a nonexistent level for ` col _ fill ` , it is created :
> > > df . reset _ index ( level = ' class ' , col _ level = 1 , col _ fill = ' genus ' )
genus speed species
class max type
name
falcon bird 389.0 fly
parrot bird 24.0 fly
lion mammal 80.5 run
monkey mammal NaN jump""" | inplace = validate_bool_kwarg ( inplace , 'inplace' )
if inplace :
new_obj = self
else :
new_obj = self . copy ( )
def _maybe_casted_values ( index , labels = None ) :
values = index . _values
if not isinstance ( index , ( PeriodIndex , DatetimeIndex ) ) :
if values . dtype == np . object_ :
values = lib . maybe_convert_objects ( values )
# if we have the labels , extract the values with a mask
if labels is not None :
mask = labels == - 1
# we can have situations where the whole mask is - 1,
# meaning there is nothing found in labels , so make all nan ' s
if mask . all ( ) :
values = np . empty ( len ( mask ) )
values . fill ( np . nan )
else :
values = values . take ( labels )
# TODO ( https : / / github . com / pandas - dev / pandas / issues / 24206)
# Push this into maybe _ upcast _ putmask ?
# We can ' t pass EAs there right now . Looks a bit
# complicated .
# So we unbox the ndarray _ values , op , re - box .
values_type = type ( values )
values_dtype = values . dtype
if issubclass ( values_type , DatetimeLikeArray ) :
values = values . _data
if mask . any ( ) :
values , changed = maybe_upcast_putmask ( values , mask , np . nan )
if issubclass ( values_type , DatetimeLikeArray ) :
values = values_type ( values , dtype = values_dtype )
return values
new_index = ibase . default_index ( len ( new_obj ) )
if level is not None :
if not isinstance ( level , ( tuple , list ) ) :
level = [ level ]
level = [ self . index . _get_level_number ( lev ) for lev in level ]
if len ( level ) < self . index . nlevels :
new_index = self . index . droplevel ( level )
if not drop :
if isinstance ( self . index , MultiIndex ) :
names = [ n if n is not None else ( 'level_%d' % i ) for ( i , n ) in enumerate ( self . index . names ) ]
to_insert = lzip ( self . index . levels , self . index . codes )
else :
default = 'index' if 'index' not in self else 'level_0'
names = ( [ default ] if self . index . name is None else [ self . index . name ] )
to_insert = ( ( self . index , None ) , )
multi_col = isinstance ( self . columns , MultiIndex )
for i , ( lev , lab ) in reversed ( list ( enumerate ( to_insert ) ) ) :
if not ( level is None or i in level ) :
continue
name = names [ i ]
if multi_col :
col_name = ( list ( name ) if isinstance ( name , tuple ) else [ name ] )
if col_fill is None :
if len ( col_name ) not in ( 1 , self . columns . nlevels ) :
raise ValueError ( "col_fill=None is incompatible " "with incomplete column name " "{}" . format ( name ) )
col_fill = col_name [ 0 ]
lev_num = self . columns . _get_level_number ( col_level )
name_lst = [ col_fill ] * lev_num + col_name
missing = self . columns . nlevels - len ( name_lst )
name_lst += [ col_fill ] * missing
name = tuple ( name_lst )
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values ( lev , lab )
new_obj . insert ( 0 , name , level_values )
new_obj . index = new_index
if not inplace :
return new_obj |
def format_obj_keys ( obj , formatter ) :
"""Take a dictionary with string keys and recursively convert
all keys from one form to another using the formatting function .
The dictionary may contain lists as values , and any nested
dictionaries within those lists will also be converted .
: param object obj : The object to convert
: param function formatter : The formatting function
for keys , which takes and returns a string
: returns : A new object with keys converted
: rtype : object
: Example :
> > > obj = {
. . . ' dict - list ' : [
. . . { ' one - key ' : 123 , ' two - key ' : 456 } ,
. . . { ' threeKey ' : 789 , ' four - key ' : 456 } ,
. . . ' some - other - key ' : ' some - unconverted - value '
> > > format _ obj _ keys ( obj , lambda s : s . upper ( ) )
' DICT - LIST ' : [
{ ' ONE - KEY ' : 123 , ' TWO - KEY ' : 456 } ,
{ ' FOUR - KEY ' : 456 , ' THREE - KEY ' : 789}
' SOME - OTHER - KEY ' : ' some - unconverted - value '""" | if type ( obj ) == list :
return [ format_obj_keys ( o , formatter ) for o in obj ]
elif type ( obj ) == dict :
return { formatter ( k ) : format_obj_keys ( v , formatter ) for k , v in obj . items ( ) }
else :
return obj |
def load_mode ( node ) :
"""Load one observing mdode""" | obs_mode = ObservingMode ( )
obs_mode . __dict__ . update ( node )
# handle validator
load_mode_validator ( obs_mode , node )
# handle builder
load_mode_builder ( obs_mode , node )
# handle tagger :
load_mode_tagger ( obs_mode , node )
return obs_mode |
def hex_to_rgb ( self , h ) :
"""Converts a valid hex color string to an RGB array .""" | rgb = ( self . hex_to_red ( h ) , self . hex_to_green ( h ) , self . hex_to_blue ( h ) )
return rgb |
def identity_factor ( self ) :
"""Returns the identity factor .
Def : The identity factor of a factor has the same scope and cardinality as the original factor ,
but the values for all the assignments is 1 . When the identity factor is multiplied with
the factor it returns the factor itself .
Returns
DiscreteFactor : The identity factor .
Examples
> > > from pgmpy . factors . discrete import DiscreteFactor
> > > phi = DiscreteFactor ( [ ' x1 ' , ' x2 ' , ' x3 ' ] , [ 2 , 3 , 2 ] , range ( 12 ) )
> > > phi _ identity = phi . identity _ factor ( )
> > > phi _ identity . variables
[ ' x1 ' , ' x2 ' , ' x3 ' ]
> > > phi _ identity . values
array ( [ [ [ 1 . , 1 . ] ,
[ 1 . , 1 . ] ,
[ 1 . , 1 . ] ] ,
[ [ 1 . , 1 . ] ,
[ 1 . , 1 . ] ,
[ 1 . , 1 . ] ] ] )""" | return DiscreteFactor ( self . variables , self . cardinality , np . ones ( self . values . size ) ) |
def getEmbeddedKeyVal ( cfgFileName , kwdName , dflt = None ) :
"""Read a config file and pull out the value of a given keyword .""" | # Assume this is a ConfigObj file . Use that s / w to quickly read it and
# put it in dict format . Assume kwd is at top level ( not in a section ) .
# The input may also be a . cfgspc file .
# Only use ConfigObj here as a tool to generate a dict from a file - do
# not use the returned object as a ConfigObj per se . As such , we can call
# with " simple " format , ie . no cfgspc , no val ' n , and " list _ values " = False .
try :
junkObj = configobj . ConfigObj ( cfgFileName , list_values = False )
except :
if kwdName == TASK_NAME_KEY :
raise KeyError ( 'Can not parse as a parameter config file: ' + '\n\t' + os . path . realpath ( cfgFileName ) )
else :
raise KeyError ( 'Unfound key "' + kwdName + '" while parsing: ' + '\n\t' + os . path . realpath ( cfgFileName ) )
if kwdName in junkObj :
retval = junkObj [ kwdName ]
del junkObj
return retval
# Not found
if dflt is not None :
del junkObj
return dflt
else :
if kwdName == TASK_NAME_KEY :
raise KeyError ( 'Can not parse as a parameter config file: ' + '\n\t' + os . path . realpath ( cfgFileName ) )
else :
raise KeyError ( 'Unfound key "' + kwdName + '" while parsing: ' + '\n\t' + os . path . realpath ( cfgFileName ) ) |
def delete_persistent_attributes ( self ) : # type : ( ) - > None
"""Deletes the persistent attributes from the persistence layer .
: rtype : None
: raises : : py : class : ` ask _ sdk _ core . exceptions . AttributesManagerException `
if trying to delete persistence attributes without persistence adapter""" | if not self . _persistence_adapter :
raise AttributesManagerException ( "Cannot delete PersistentAttributes without " "persistence adapter!" )
if self . _persistent_attributes_set :
self . _persistence_adapter . delete_attributes ( request_envelope = self . _request_envelope )
self . _persistence_attributes = { }
self . _persistent_attributes_set = False |
def get_list_continuous_queries ( self ) :
"""Get the list of continuous queries in InfluxDB .
: return : all CQs in InfluxDB
: rtype : list of dictionaries
: Example :
> > cqs = client . get _ list _ cqs ( )
> > cqs
u ' db1 ' : [ ]
u ' db2 ' : [
u ' name ' : u ' vampire ' ,
u ' query ' : u ' CREATE CONTINUOUS QUERY vampire ON '
' mydb BEGIN SELECT count ( dracula ) INTO '
' mydb . autogen . all _ of _ them FROM '
' mydb . autogen . one GROUP BY time ( 5m ) END '""" | query_string = "SHOW CONTINUOUS QUERIES"
return [ { sk [ 0 ] : list ( p ) } for sk , p in self . query ( query_string ) . items ( ) ] |
def remove_bond ( self , idx1 , idx2 ) :
"""Remove a bond from an openbabel molecule
Args :
idx1 : The atom index of one of the atoms participating the in bond
idx2 : The atom index of the other atom participating in the bond""" | for obbond in ob . OBMolBondIter ( self . _obmol ) :
if ( obbond . GetBeginAtomIdx ( ) == idx1 and obbond . GetEndAtomIdx ( ) == idx2 ) or ( obbond . GetBeginAtomIdx ( ) == idx2 and obbond . GetEndAtomIdx ( ) == idx1 ) :
self . _obmol . DeleteBond ( obbond ) |
def wrap_function_name ( self , name ) :
"""Split the function name on multiple lines .""" | if len ( name ) > 32 :
ratio = 2.0 / 3.0
height = max ( int ( len ( name ) / ( 1.0 - ratio ) + 0.5 ) , 1 )
width = max ( len ( name ) / height , 32 )
# TODO : break lines in symbols
name = textwrap . fill ( name , width , break_long_words = False )
# Take away spaces
name = name . replace ( ", " , "," )
name = name . replace ( "> >" , ">>" )
name = name . replace ( "> >" , ">>" )
# catch consecutive
return name |
def send ( self , api , force_send ) :
"""Send this item using api .
: param api : D4S2Api sends messages to D4S2
: param force _ send : bool should we send even if the item already exists""" | item_id = self . get_existing_item_id ( api )
if not item_id :
item_id = self . create_item_returning_id ( api )
api . send_item ( self . destination , item_id , force_send )
else :
if force_send :
api . send_item ( self . destination , item_id , force_send )
else :
item_type = D4S2Api . DEST_TO_NAME . get ( self . destination , "Item" )
msg = "{} already sent. Run with --resend argument to resend."
raise D4S2Error ( msg . format ( item_type ) , warning = True ) |
def all_stats ( klass , account , ids , metric_groups , ** kwargs ) :
"""Pulls a list of metrics for a specified set of object IDs .""" | params = klass . _standard_params ( ids , metric_groups , ** kwargs )
resource = klass . RESOURCE_SYNC . format ( account_id = account . id )
response = Request ( account . client , 'get' , resource , params = params ) . perform ( )
return response . body [ 'data' ] |
def get_subclass ( cls , name ) :
"""Get Benchmark subclass by name
: param name : name returned by ` ` Benchmark . name ` ` property
: return : instance of ` ` Benchmark ` ` class""" | for subclass in cls . __subclasses__ ( ) :
if subclass . name == name :
return subclass
raise NameError ( "Not a valid Benchmark class: " + name ) |
def get_absorbing_atom_symbol_index ( absorbing_atom , structure ) :
"""Return the absorbing atom symboll and site index in the given structure .
Args :
absorbing _ atom ( str / int ) : symbol or site index
structure ( Structure )
Returns :
str , int : symbol and site index""" | if isinstance ( absorbing_atom , str ) :
return absorbing_atom , structure . indices_from_symbol ( absorbing_atom ) [ 0 ]
elif isinstance ( absorbing_atom , int ) :
return str ( structure [ absorbing_atom ] . specie ) , absorbing_atom
else :
raise ValueError ( "absorbing_atom must be either specie symbol or site index" ) |
def make_str_node ( rawtext , app , prefixed_name , obj , parent , modname , options ) :
"""Render a Python object to text using the repr ( ) function .
: param rawtext : Text being replaced with link node .
: param app : Sphinx application context
: param prefixed _ name : The dotted Python name for obj .
: param obj : The Python object to be rendered to text .
: param parent : The parent Python object of obj .
: param module : The name of the module containing obj .
: param options : Options dictionary passed to role func .""" | text = str ( obj )
node = nodes . Text ( text , rawsource = rawtext )
return node |
def get_subassistant_tree ( self ) :
"""Returns a tree - like structure representing the assistant hierarchy going down
from this assistant to leaf assistants .
For example : [ ( < This Assistant > ,
[ ( < Subassistant 1 > , [ . . . ] ) ,
( < Subassistant 2 > , [ . . . ] ) ]
Returns :
a tree - like structure ( see above ) representing assistant hierarchy going down
from this assistant to leaf assistants""" | if '_tree' not in dir ( self ) :
subassistant_tree = [ ]
subassistants = self . get_subassistants ( )
for subassistant in subassistants :
subassistant_tree . append ( subassistant . get_subassistant_tree ( ) )
self . _tree = ( self , subassistant_tree )
return self . _tree |
def createStatus ( self , repo_user , repo_name , sha , state , target_url = None , context = None , issue = None , description = None ) :
""": param repo _ user : GitHub user or organization
: param repo _ name : Name of the repository
: param issue : Pull request number
: param state : one of the following ' pending ' , ' success ' , ' error '
or ' failure ' .
: param description : Short description of the status .
: return : A deferred with the result from GitHub .
This code comes from txgithub by @ tomprince .
txgithub is based on twisted ' s webclient agent , which is much less reliable and featureful
as txrequest ( support for proxy , connection pool , keep alive , retry , etc )""" | payload = { 'body' : description }
return self . _http . post ( '/' . join ( [ '/repos' , repo_user , repo_name , 'issues' , issue , 'comments' ] ) , json = payload ) |
def indication ( self , pdu ) :
"""Requests are queued for delivery .""" | if _debug :
TCPServer . _debug ( "indication %r" , pdu )
self . request += pdu . pduData |
def times_to_ms ( h = 0 , m = 0 , s = 0 , ms = 0 ) :
"""Convert hours , minutes , seconds to milliseconds .
Arguments may be positive or negative , int or float ,
need not be normalized ( ` ` s = 120 ` ` is okay ) .
Returns :
Number of milliseconds ( rounded to int ) .""" | ms += s * 1000
ms += m * 60000
ms += h * 3600000
return int ( round ( ms ) ) |
def do_unique ( environment , value , case_sensitive = False , attribute = None ) :
"""Returns a list of unique items from the the given iterable .
. . sourcecode : : jinja
{ { [ ' foo ' , ' bar ' , ' foobar ' , ' FooBar ' ] | unique } }
- > [ ' foo ' , ' bar ' , ' foobar ' ]
The unique items are yielded in the same order as their first occurrence in
the iterable passed to the filter .
: param case _ sensitive : Treat upper and lower case strings as distinct .
: param attribute : Filter objects with unique values for this attribute .""" | getter = make_attrgetter ( environment , attribute , postprocess = ignore_case if not case_sensitive else None )
seen = set ( )
for item in value :
key = getter ( item )
if key not in seen :
seen . add ( key )
yield item |
def encap ( self , pkt ) :
"""encapsulate a frame using this Secure Association""" | if pkt . name != Ether ( ) . name :
raise TypeError ( 'cannot encapsulate packet in MACsec, must be Ethernet' )
# noqa : E501
hdr = copy . deepcopy ( pkt )
payload = hdr . payload
del hdr . payload
tag = MACsec ( sci = self . sci , an = self . an , SC = self . send_sci , E = self . e_bit ( ) , C = self . c_bit ( ) , shortlen = MACsecSA . shortlen ( pkt ) , pn = ( self . pn & 0xFFFFFFFF ) , type = pkt . type )
hdr . type = ETH_P_MACSEC
return hdr / tag / payload |
def _z2deriv ( self , R , z , phi = 0. , t = 0. ) :
"""NAME :
_ z2deriv
PURPOSE :
evaluate the second vertical derivative for this potential
INPUT :
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT :
the second vertical derivative
HISTORY :
2012-07-25 - Written - Bovy ( IAS @ MPIA )""" | sqrtbz = nu . sqrt ( self . _b2 + z ** 2. )
asqrtbz = self . _a + sqrtbz
if isinstance ( R , float ) and sqrtbz == asqrtbz :
return ( self . _b2 + R ** 2. - 2. * z ** 2. ) * ( self . _b2 + R ** 2. + z ** 2. ) ** - 2.5
else :
return ( ( self . _a ** 3. * self . _b2 + self . _a ** 2. * ( 3. * self . _b2 - 2. * z ** 2. ) * nu . sqrt ( self . _b2 + z ** 2. ) + ( self . _b2 + R ** 2. - 2. * z ** 2. ) * ( self . _b2 + z ** 2. ) ** 1.5 + self . _a * ( 3. * self . _b2 ** 2. - 4. * z ** 4. + self . _b2 * ( R ** 2. - z ** 2. ) ) ) / ( ( self . _b2 + z ** 2. ) ** 1.5 * ( R ** 2. + asqrtbz ** 2. ) ** 2.5 ) ) |
def value_comparisons ( self , values , comp = "=" , is_assignment = False ) :
"""Builds out a series of value comparisions .
: values : can either be a dictionary , in which case the return will compare a name to a named
placeholder , using the comp argument . I . E . values = { " first _ name " : " John " , " last _ name " : " Smith " }
will return [ " first _ name = % ( first _ name ) s " , " last _ name = % ( last _ name ) s " ] .
Otherwise values will be assumed to be an iterable of 2 - or 3 - tuples in the form
( column , value [ , operator ] ) . When operator is not specified , it will fallback to comp . So for
instance values = [ ( " first _ name " , " John " ) , ( " id " , ( 10 , 100 ) , " between " ) ] will return
[ " first _ name = % s " , " id between % s and % s " ] .
: is _ assigment : if False , transform _ op will be called on each operator .""" | if isinstance ( values , dict ) :
if self . sort_columns :
keys = sorted ( values . keys ( ) )
else :
keys = list ( values . keys ( ) )
params = zip ( keys , [ self . to_placeholder ( k ) for k in keys ] )
return [ self . to_expression ( i [ 0 ] , i [ 1 ] , comp if is_assignment else self . transform_op ( comp , values [ i [ 0 ] ] ) ) for i in params ]
else :
if self . sort_columns :
values = sorted ( values , key = operator . itemgetter ( 0 ) )
comps = [ ]
for val in values :
lhs = val [ 0 ]
op = val [ 2 ] if len ( val ) == 3 else comp
if op == "raw" :
rhs = None
elif op == "between" :
rhs = ( self . to_placeholder ( ) , self . to_placeholder ( ) )
elif op == "in" :
rhs = [ self . to_placeholder ( ) for i in val [ 1 ] ]
else :
rhs = self . to_placeholder ( )
if not is_assignment :
op = self . transform_op ( op , val [ 1 ] )
comps . append ( self . to_expression ( lhs , rhs , op ) )
return comps |
def sorted_fancy_indexing ( indexable , request ) :
"""Safe fancy indexing .
Some objects , such as h5py datasets , only support list indexing
if the list is sorted .
This static method adds support for unsorted list indexing by
sorting the requested indices , accessing the corresponding
elements and re - shuffling the result .
Parameters
request : list of int
Unsorted list of example indices .
indexable : any fancy - indexable object
Indexable we ' d like to do unsorted fancy indexing on .""" | if len ( request ) > 1 :
indices = numpy . argsort ( request )
data = numpy . empty ( shape = ( len ( request ) , ) + indexable . shape [ 1 : ] , dtype = indexable . dtype )
data [ indices ] = indexable [ numpy . array ( request ) [ indices ] , ... ]
else :
data = indexable [ request ]
return data |
def register_or_check ( klass , finish , mean , between , refresh_presision , configuration ) :
"""Return the active configurations .""" | m , created = klass . objects . get_or_create ( finish = finish , configuration = configuration )
if created :
m . mean = mean
m . between = between
m . refresh_presision = refresh_presision
m . save ( )
else :
diff = abs ( float ( m . mean ) - mean )
if not ( diff < 0.006 and m . between == between and m . refresh_presision == refresh_presision ) :
raise InvalidMeasurementError ( "There are diferents values for the same measurement." )
return m |
def add_if_none_match ( self ) :
"""Add the if - none - match option to the request .""" | option = Option ( )
option . number = defines . OptionRegistry . IF_NONE_MATCH . number
option . value = None
self . add_option ( option ) |
def download_isbn ( ) :
with open ( 'raw.json' ) as input :
data = json . load ( input )
'''print ( " Loading index " )
md = readmetadata ( )
for record in data :
id = record [ ' metadata ' ] [ ' id ' ]
metadata = md [ id ]
pprint ( metadata )
sys . exit ( 0)
title = record [ ' book ' ] [ ' title ' ]
isbn = isbnlib . isbn _ from _ words ( title )
metadata = isbnlib . meta ( isbn )
pprint ( metadata )
extracted _ title = ' '
print title , extracted _ title , isbn
time . sleep ( 1)''' | url = 'http://www.librarything.com/api/thingTitle/'
for record in data :
title = record [ 'book' ] [ 'title' ]
final_url = url + quote_plus ( title )
content = requests . get ( final_url ) . content
soup = BeautifulSoup ( content )
extracted_title = soup . idlist . title . string
isbn = soup . idlist . find ( "isbn" ) . string
metadata = isbnlib . meta ( isbn )
pprint ( metadata )
# print title , extracted _ title , min ( isbns )
time . sleep ( 1 ) |
def get_by_user_ldap ( self , user_name ) :
"""Get user by the ldap name .
is _ more - If more than 3 of groups of users or no , to control expansion Screen .
: return : Dictionary with the following structure :
{ ' usuario ' : [ { ' nome ' : < nome > ,
' id ' : < id > ,
' pwd ' : < pwd > ,
' user ' : < user > ,
' ativo ' : < ativo > ,
' email ' : < email > ,
' grupos ' : [ nome _ grupo , . . . more user groups . . . ] ,
' user _ ldap ' : < user _ ldap > } }
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response .""" | url = 'user/get/ldap/' + str ( user_name ) + '/'
code , xml = self . submit ( None , 'GET' , url )
return self . response ( code , xml ) |
def _flatten_output_shape ( input_shape , num_axis_to_keep = 1 ) :
"""Output shape of a flatten layer .""" | if num_axis_to_keep >= len ( input_shape ) :
raise ValueError ( "num_axis_to_keep[%d] should be less than input's rank[%d]" % ( num_axis_to_keep , len ( input_shape ) ) )
return tuple ( input_shape [ : num_axis_to_keep ] ) + ( reduce ( op . mul , input_shape [ num_axis_to_keep : ] , 1 ) , ) |
def _merge_defaults ( self , data : dict ) -> dict :
"""Convenience method that calls : func : ` ~ notifiers . utils . helpers . merge _ dicts ` in order to merge
default values
: param data : Notification data
: return : A merged dict of provided data with added defaults""" | log . debug ( "merging defaults %s into data %s" , self . defaults , data )
return merge_dicts ( data , self . defaults ) |
def get_terms ( desc , geneset , assoc , obo_dag , log ) :
"""Get the terms in the study group""" | _chk_gene2go ( assoc )
term2itemids = defaultdict ( set )
genes = [ g for g in geneset if g in assoc ]
for gene in genes :
for goid in assoc [ gene ] :
if goid in obo_dag :
term2itemids [ obo_dag [ goid ] . id ] . add ( gene )
if log is not None :
num_stu = len ( genes )
num_pop = len ( geneset )
perc = 100.0 * num_stu / num_pop if num_pop != 0 else 0.0
log . write ( "{P:3.0f}% {N:>6,} of {M:>6,} {DESC} items found in association\n" . format ( DESC = desc , N = num_stu , M = num_pop , P = perc ) )
return term2itemids |
def timeout ( seconds ) :
"""Raises a TimeoutError if a function does not terminate within
specified seconds .""" | def _timeout_error ( signal , frame ) :
raise TimeoutError ( "Operation did not finish within \
{} seconds" . format ( seconds ) )
def timeout_decorator ( func ) :
@ wraps ( func )
def timeout_wrapper ( * args , ** kwargs ) :
signal . signal ( signal . SIGALRM , _timeout_error )
signal . alarm ( seconds )
try :
return func ( * args , ** kwargs )
finally :
signal . alarm ( 0 )
return timeout_wrapper
return timeout_decorator |
def subspace_detector_plot ( detector , stachans , size , ** kwargs ) :
"""Plotting for the subspace detector class .
Plot the output basis vectors for the detector at the given dimension .
Corresponds to the first n horizontal vectors of the V matrix .
: type detector : : class : ` eqcorrscan . core . subspace . Detector `
: type stachans : list
: param stachans : list of tuples of station , channel pairs to plot .
: type stachans : list
: param stachans : List of tuples of ( station , channel ) to use . Can set to ' all ' to use all the station - channel pairs available . If detector is multiplexed , will just plot that .
: type size : tuple
: param size : Figure size .
: returns : Figure
: rtype : matplotlib . pyplot . Figure
. . rubric : : Example
> > > from eqcorrscan . core import subspace
> > > import os
> > > detector = subspace . Detector ( )
> > > detector . read ( os . path . join (
. . . os . path . abspath ( os . path . dirname ( _ _ file _ _ ) ) ,
. . . ' . . ' , ' tests ' , ' test _ data ' , ' subspace ' ,
. . . ' stat _ test _ detector . h5 ' ) )
Detector : Tester
> > > subspace _ detector _ plot ( detector = detector , stachans = ' all ' , size = ( 10 , 7 ) ,
. . . show = True ) # doctest : + SKIP
. . plot : :
from eqcorrscan . core import subspace
from eqcorrscan . utils . plotting import subspace _ detector _ plot
import os
print ( ' running subspace plot ' )
detector = subspace . Detector ( )
detector . read ( os . path . join ( ' . . ' , ' . . ' , ' . . ' , ' tests ' , ' test _ data ' ,
' subspace ' , ' stat _ test _ detector . h5 ' ) )
subspace _ detector _ plot ( detector = detector , stachans = ' all ' , size = ( 10 , 7 ) ,
show = True )""" | import matplotlib . pyplot as plt
if stachans == 'all' and not detector . multiplex :
stachans = detector . stachans
elif detector . multiplex :
stachans = [ ( 'multi' , ' ' ) ]
if np . isinf ( detector . dimension ) :
msg = ' ' . join ( [ 'Infinite subspace dimension. Only plotting as many' , 'dimensions as events in design set' ] )
warnings . warn ( msg )
nrows = detector . v [ 0 ] . shape [ 1 ]
else :
nrows = detector . dimension
fig , axes = plt . subplots ( nrows = nrows , ncols = len ( stachans ) , sharex = True , sharey = True , figsize = size )
x = np . arange ( len ( detector . u [ 0 ] ) , dtype = np . float32 )
if detector . multiplex :
x /= len ( detector . stachans ) * detector . sampling_rate
else :
x /= detector . sampling_rate
for column , stachan in enumerate ( stachans ) :
channel = detector . u [ column ]
for row , vector in enumerate ( channel . T [ 0 : nrows ] ) :
if len ( stachans ) == 1 :
if nrows == 1 :
axis = axes
else :
axis = axes [ row ]
else :
axis = axes [ row , column ]
if row == 0 :
axis . set_title ( '.' . join ( stachan ) )
axis . plot ( x , vector , 'k' , linewidth = 1.1 )
if column == 0 :
axis . set_ylabel ( 'Basis %s' % ( row + 1 ) , rotation = 0 )
if row == nrows - 1 :
axis . set_xlabel ( 'Time (s)' )
axis . set_yticks ( [ ] )
plt . subplots_adjust ( hspace = 0.05 )
plt . subplots_adjust ( wspace = 0.05 )
fig = _finalise_figure ( fig = fig , ** kwargs )
# pragma : no cover
return fig |
def _setup_rpc ( self ) :
"""Setup the RPC client for the current agent .""" | self . _state_rpc = agent_rpc . PluginReportStateAPI ( topics . REPORTS )
report_interval = CONF . AGENT . report_interval
if report_interval :
heartbeat = loopingcall . FixedIntervalLoopingCall ( self . _report_state )
heartbeat . start ( interval = report_interval ) |
def place_visual ( self ) :
"""Places visual objects randomly until no collisions or max iterations hit .""" | index = 0
bin_pos = string_to_array ( self . bin2_body . get ( "pos" ) )
bin_size = self . bin_size
for _ , obj_mjcf in self . visual_objects :
bin_x_low = bin_pos [ 0 ]
bin_y_low = bin_pos [ 1 ]
if index == 0 or index == 2 :
bin_x_low -= bin_size [ 0 ] / 2
if index < 2 :
bin_y_low -= bin_size [ 1 ] / 2
bin_x_high = bin_x_low + bin_size [ 0 ] / 2
bin_y_high = bin_y_low + bin_size [ 1 ] / 2
bottom_offset = obj_mjcf . get_bottom_offset ( )
bin_range = [ bin_x_low + bin_x_high , bin_y_low + bin_y_high , 2 * bin_pos [ 2 ] ]
bin_center = np . array ( bin_range ) / 2.0
pos = bin_center - bottom_offset
self . visual_obj_mjcf [ index ] . set ( "pos" , array_to_string ( pos ) )
index += 1 |
def com_google_fonts_check_family_panose_proportion ( ttFonts ) :
"""Fonts have consistent PANOSE proportion ?""" | failed = False
proportion = None
for ttFont in ttFonts :
if proportion is None :
proportion = ttFont [ 'OS/2' ] . panose . bProportion
if proportion != ttFont [ 'OS/2' ] . panose . bProportion :
failed = True
if failed :
yield FAIL , ( "PANOSE proportion is not" " the same accross this family." " In order to fix this," " please make sure that the panose.bProportion value" " is the same in the OS/2 table of all of this family" " font files." )
else :
yield PASS , "Fonts have consistent PANOSE proportion." |
async def setChatStickerSet ( self , chat_id , sticker_set_name ) :
"""See : https : / / core . telegram . org / bots / api # setchatstickerset""" | p = _strip ( locals ( ) )
return await self . _api_request ( 'setChatStickerSet' , _rectify ( p ) ) |
def apriori ( transactions , ** kwargs ) :
"""Executes Apriori algorithm and returns a RelationRecord generator .
Arguments :
transactions - - A transaction iterable object
( eg . [ [ ' A ' , ' B ' ] , [ ' B ' , ' C ' ] ] ) .
Keyword arguments :
min _ support - - The minimum support of relations ( float ) .
min _ confidence - - The minimum confidence of relations ( float ) .
min _ lift - - The minimum lift of relations ( float ) .
max _ length - - The maximum length of the relation ( integer ) .""" | # Parse the arguments .
min_support = kwargs . get ( 'min_support' , 0.1 )
min_confidence = kwargs . get ( 'min_confidence' , 0.0 )
min_lift = kwargs . get ( 'min_lift' , 0.0 )
max_length = kwargs . get ( 'max_length' , None )
# Check arguments .
if min_support <= 0 :
raise ValueError ( 'minimum support must be > 0' )
# For testing .
_gen_support_records = kwargs . get ( '_gen_support_records' , gen_support_records )
_gen_ordered_statistics = kwargs . get ( '_gen_ordered_statistics' , gen_ordered_statistics )
_filter_ordered_statistics = kwargs . get ( '_filter_ordered_statistics' , filter_ordered_statistics )
# Calculate supports .
transaction_manager = TransactionManager . create ( transactions )
support_records = _gen_support_records ( transaction_manager , min_support , max_length = max_length )
# Calculate ordered stats .
for support_record in support_records :
ordered_statistics = list ( _filter_ordered_statistics ( _gen_ordered_statistics ( transaction_manager , support_record ) , min_confidence = min_confidence , min_lift = min_lift , ) )
if not ordered_statistics :
continue
yield RelationRecord ( support_record . items , support_record . support , ordered_statistics ) |
def _open_sqlite ( db_file ) :
"""Opens database connection .""" | db_file = os . path . expanduser ( db_file )
try :
with open ( db_file ) : # test that the file can be accessed
pass
return sqlite3 . connect ( db_file , detect_types = sqlite3 . PARSE_DECLTYPES )
except ( IOError , sqlite3 . Error ) as err :
raise Dump2PolarionException ( "{}" . format ( err ) ) |
def has_listener ( self , evt_name , fn ) :
"""指定listener是否存在
: params evt _ name : 事件名称
: params fn : 要注册的触发函数函数""" | listeners = self . __get_listeners ( evt_name )
return fn in listeners |
def delete_key ( self , key_id ) :
"""Delete user key pointed to by ` ` key _ id ` ` .
: param int key _ id : ( required ) , unique id used by Github
: returns : bool""" | key = self . key ( key_id )
if key :
return key . delete ( )
return False |
def rAsciiLine ( ifile ) :
"""Returns the next non - blank line in an ASCII file .""" | _line = ifile . readline ( ) . strip ( )
while len ( _line ) == 0 :
_line = ifile . readline ( ) . strip ( )
return _line |
def _import_PREFIXCC ( keyword = "" ) :
"""List models from web catalog ( prefix . cc ) and ask which one to import
2015-10-10 : originally part of main ontospy ; now standalone only
2016-06-19 : eliminated dependency on extras . import _ web""" | SOURCE = "http://prefix.cc/popular/all.file.vann"
options = [ ]
printDebug ( "----------\nReading source..." )
g = Ontospy ( SOURCE , verbose = False )
for x in g . all_ontologies :
if keyword :
if keyword in unicode ( x . prefix ) . lower ( ) or keyword in unicode ( x . uri ) . lower ( ) :
options += [ ( unicode ( x . prefix ) , unicode ( x . uri ) ) ]
else :
options += [ ( unicode ( x . prefix ) , unicode ( x . uri ) ) ]
printDebug ( "----------\n%d results found." % len ( options ) )
counter = 1
for x in options :
print ( Fore . BLUE + Style . BRIGHT + "[%d]" % counter , Style . RESET_ALL + x [ 0 ] + " ==> " , Fore . RED + x [ 1 ] , Style . RESET_ALL )
# print ( Fore . BLUE + x [ 0 ] , " = = > " , x [ 1 ] )
counter += 1
while True :
var = input ( Style . BRIGHT + "=====\nSelect ID to import: (q=quit)\n" + Style . RESET_ALL )
if var == "q" :
break
else :
try :
_id = int ( var )
ontouri = options [ _id - 1 ] [ 1 ]
print ( Fore . RED + "\n---------\n" + ontouri + "\n---------" + Style . RESET_ALL )
action_analyze ( [ ontouri ] )
if click . confirm ( '=====\nDo you want to save to your local library?' ) :
action_import ( ontouri )
return
except :
print ( "Error retrieving file. Import failed." )
continue |
def dump_request_and_response ( self , response ) :
"""Return a string containing a nicely formatted representation of the request
and response objects for logging and debugging .
- Note : Does not work if the request or response body is a MultipartEncoder
object .""" | if response . reason is None :
response . reason = "<unknown>"
return d1_client . util . normalize_request_response_dump ( requests_toolbelt . utils . dump . dump_response ( response ) ) |
def load_files ( self , path ) :
"""Loads files in a given path and all its subdirectories""" | if self . verbose == 2 :
print ( "Indexing {}" . format ( path ) )
for filename in os . listdir ( path ) :
file_path = path + "/" + filename
if os . path . isdir ( file_path ) :
self . load_files ( file_path )
elif filename . endswith ( ".yaml" ) or filename . endswith ( ".yml" ) :
self . unfold_yaml ( file_path ) |
def add_eager_constraints ( self , models ) :
"""Set the constraints for an eager load of the relation .
: type models : list""" | self . _models = Collection . make ( models )
self . _build_dictionary ( models ) |
def set_auto_delete_disks ( self , auto_delete_disks ) :
"""Enable / disable use of auto delete disks
: param auto _ delete _ disks : activate / deactivate auto delete disks ( boolean )""" | if auto_delete_disks :
log . info ( 'Router "{name}" [{id}]: auto delete disks enabled' . format ( name = self . _name , id = self . _id ) )
else :
log . info ( 'Router "{name}" [{id}]: auto delete disks disabled' . format ( name = self . _name , id = self . _id ) )
self . _auto_delete_disks = auto_delete_disks |
def answer_challenge ( authzr , client , responders ) :
"""Complete an authorization using a responder .
: param ~ acme . messages . AuthorizationResource auth : The authorization to
complete .
: param . Client client : The ACME client .
: type responders : List [ ` ~ txacme . interfaces . IResponder ` ]
: param responders : A list of responders that can be used to complete the
challenge with .
: return : A deferred firing when the authorization is verified .""" | responder , challb = _find_supported_challenge ( authzr , responders )
response = challb . response ( client . key )
def _stop_responding ( ) :
return maybeDeferred ( responder . stop_responding , authzr . body . identifier . value , challb . chall , response )
return ( maybeDeferred ( responder . start_responding , authzr . body . identifier . value , challb . chall , response ) . addCallback ( lambda _ : client . answer_challenge ( challb , response ) ) . addCallback ( lambda _ : _stop_responding ) ) |
def encodeValue ( value ) :
"""TODO""" | if isinstance ( value , ( list , tuple ) ) :
return [ common . AttributeValue ( string_value = str ( v ) ) for v in value ]
else :
return [ common . AttributeValue ( string_value = str ( value ) ) ] |
def darklyrics ( song ) :
"""Returns the lyrics found in darklyrics for the specified mp3 file or an
empty string if not found .""" | # Darklyrics relies on the album name
if not hasattr ( song , 'album' ) or not song . album :
song . fetch_album_name ( )
if not hasattr ( song , 'album' ) or not song . album : # If we don ' t have the name of the album , there ' s nothing we can do
# on darklyrics
return ''
artist = song . artist . lower ( )
artist = normalize ( artist , URLESCAPES , '' )
album = song . album . lower ( )
album = normalize ( album , URLESCAPES , '' )
title = song . title
url = 'http://www.darklyrics.com/lyrics/{}/{}.html' . format ( artist , album )
soup = get_url ( url )
text = ''
for header in soup . find_all ( 'h3' ) :
song = str ( header . get_text ( ) )
next_sibling = header . next_sibling
if song . lower ( ) . find ( title . lower ( ) ) != - 1 :
while next_sibling is not None and ( next_sibling . name is None or next_sibling . name != 'h3' ) :
if next_sibling . name is None :
text += str ( next_sibling )
next_sibling = next_sibling . next_sibling
return text . strip ( ) |
def add_callback ( self , callback : callable ) :
"""Add a callback on change
: param callback : callable function
: return : None""" | def internal_callback ( * args ) :
try :
callback ( )
except TypeError :
callback ( self . get ( ) )
self . _var . trace ( 'w' , internal_callback ) |
def main ( ) :
'''Main function to run the sensor with passed arguments''' | trig , echo , speed , samples = get_args ( )
print ( 'trig pin = gpio {}' . format ( trig ) )
print ( 'echo pin = gpio {}' . format ( echo ) )
print ( 'speed = {}' . format ( speed ) )
print ( 'samples = {}' . format ( samples ) )
print ( '' )
value = sensor . Measurement ( trig , echo )
raw_distance = value . raw_distance ( sample_size = samples , sample_wait = speed )
imperial_distance = value . distance_imperial ( raw_distance )
metric_distance = value . distance_metric ( raw_distance )
print ( 'The imperial distance is {} inches.' . format ( imperial_distance ) )
print ( 'The metric distance is {} centimetres.' . format ( metric_distance ) ) |
def _batch_norm_without_layers ( self , input_layer , decay , use_scale , epsilon ) :
"""Batch normalization on ` input _ layer ` without tf . layers .""" | shape = input_layer . shape
num_channels = shape [ 3 ] if self . data_format == "NHWC" else shape [ 1 ]
beta = self . get_variable ( "beta" , [ num_channels ] , tf . float32 , tf . float32 , initializer = tf . zeros_initializer ( ) )
if use_scale :
gamma = self . get_variable ( "gamma" , [ num_channels ] , tf . float32 , tf . float32 , initializer = tf . ones_initializer ( ) )
else :
gamma = tf . constant ( 1.0 , tf . float32 , [ num_channels ] )
moving_mean = tf . get_variable ( "moving_mean" , [ num_channels ] , tf . float32 , initializer = tf . zeros_initializer ( ) , trainable = False )
moving_variance = tf . get_variable ( "moving_variance" , [ num_channels ] , tf . float32 , initializer = tf . ones_initializer ( ) , trainable = False )
if self . phase_train :
bn , batch_mean , batch_variance = tf . nn . fused_batch_norm ( input_layer , gamma , beta , epsilon = epsilon , data_format = self . data_format , is_training = True )
mean_update = moving_averages . assign_moving_average ( moving_mean , batch_mean , decay = decay , zero_debias = False )
variance_update = moving_averages . assign_moving_average ( moving_variance , batch_variance , decay = decay , zero_debias = False )
tf . add_to_collection ( tf . GraphKeys . UPDATE_OPS , mean_update )
tf . add_to_collection ( tf . GraphKeys . UPDATE_OPS , variance_update )
else :
bn , _ , _ = tf . nn . fused_batch_norm ( input_layer , gamma , beta , mean = moving_mean , variance = moving_variance , epsilon = epsilon , data_format = self . data_format , is_training = False )
return bn |
def register ( self , callback_id : str , handler : Any , name : str = "*" ) -> None :
"""Register a new handler for a specific : class : ` slack . actions . Action ` ` callback _ id ` .
Optional routing based on the action name too .
The name argument is useful for actions of type ` interactive _ message ` to provide
a different handler for each individual action .
Args :
callback _ id : Callback _ id the handler is interested in
handler : Callback
name : Name of the action ( optional ) .""" | LOG . info ( "Registering %s, %s to %s" , callback_id , name , handler )
if name not in self . _routes [ callback_id ] :
self . _routes [ callback_id ] [ name ] = [ ]
self . _routes [ callback_id ] [ name ] . append ( handler ) |
def timezones_choices ( ) :
"""Timezones values and their labels for current locale .
: return : an iterable of ` ( code , label ) ` , code being a timezone code and label
the timezone name in current locale .""" | utcnow = pytz . utc . localize ( datetime . utcnow ( ) )
locale = _get_locale ( )
for tz in sorted ( pytz . common_timezones ) :
tz = get_timezone ( tz )
now = tz . normalize ( utcnow . astimezone ( tz ) )
label = "({}) {}" . format ( get_timezone_gmt ( now , locale = locale ) , tz . zone )
yield ( tz , label ) |
def nn_poll ( fds , timeout = - 1 ) :
"""nn _ pollfds
: param fds : dict ( file descriptor = > pollmode )
: param timeout : timeout in milliseconds
: return :""" | polls = [ ]
for i , entry in enumerate ( fds . items ( ) ) :
s = PollFds ( )
fd , event = entry
s . fd = fd
s . events = event
s . revents = 0
polls . append ( s )
poll_array = ( PollFds * len ( fds ) ) ( * polls )
res = _nn_poll ( poll_array , len ( fds ) , int ( timeout ) )
if res <= 0 :
return res , { }
return res , { item . fd : item . revents for item in poll_array } |
def create_case ( self , name , email , subject , description , businessImpact , priority , phone ) :
"""Send a case creation to SalesForces to create a ticket .
@ param name of the person creating the case .
@ param email of the person creating the case .
@ param subject of the case .
@ param description of the case .
@ param businessImpact of the case .
@ param priority of the case .
@ param phone of the person creating the case .
@ return Nothing if this is ok .
@ raise ServerError when something goes wrong .
@ raise ValueError when data passed in are invalid""" | if not ( '@' in parseaddr ( email ) [ 1 ] ) :
raise ValueError ( 'invalid email: {}' . format ( email ) )
if '' == name or name is None :
raise ValueError ( 'empty name' )
if '' == subject or subject is None :
raise ValueError ( 'empty subject' )
if '' == description or description is None :
raise ValueError ( 'empty description' )
if '' == businessImpact or businessImpact is None :
raise ValueError ( 'empty business impact' )
if priority is None :
raise ValueError ( 'Ensure the priority is from the set of ' 'known priorities' )
if '' == phone or phone is None :
raise ValueError ( 'empty phone' )
try :
r = requests . post ( self . url , data = { 'orgid' : self . orgId , 'recordType' : self . recordType , 'name' : name , 'email' : email , 'subject' : subject , 'description' : description , self . BUSINESS_IMPACT : businessImpact , 'priority' : priority , 'phone' : phone , 'external' : 1 } , timeout = self . timeout )
r . raise_for_status ( )
except Timeout :
message = 'Request timed out: {url} timeout: {timeout}'
message = message . format ( url = self . url , timeout = self . timeout )
log . error ( message )
raise ServerError ( message )
except RequestException as err :
log . info ( 'cannot create case: {}' . format ( err ) )
raise ServerError ( 'cannot create case: {}' . format ( err ) ) |
def validate_field_name ( bases : List [ Type [ 'BaseModel' ] ] , field_name : str ) -> None :
"""Ensure that the field ' s name does not shadow an existing attribute of the model .""" | for base in bases :
if getattr ( base , field_name , None ) :
raise NameError ( f'Field name "{field_name}" shadows a BaseModel attribute; ' f'use a different field name with "alias=\'{field_name}\'".' ) |
def _invoke_request_handler ( self , function_name ) :
"""Request Handler for the Local Lambda Invoke path . This method is responsible for understanding the incoming
request and invoking the Local Lambda Function
Parameters
function _ name str
Name of the function to invoke
Returns
A Flask Response response object as if it was returned from Lambda""" | flask_request = request
request_data = flask_request . get_data ( )
if not request_data :
request_data = b'{}'
request_data = request_data . decode ( 'utf-8' )
stdout_stream = io . BytesIO ( )
stdout_stream_writer = StreamWriter ( stdout_stream , self . is_debugging )
try :
self . lambda_runner . invoke ( function_name , request_data , stdout = stdout_stream_writer , stderr = self . stderr )
except FunctionNotFound :
LOG . debug ( '%s was not found to invoke.' , function_name )
return LambdaErrorResponses . resource_not_found ( function_name )
lambda_response , lambda_logs , is_lambda_user_error_response = LambdaOutputParser . get_lambda_output ( stdout_stream )
if self . stderr and lambda_logs : # Write the logs to stderr if available .
self . stderr . write ( lambda_logs )
if is_lambda_user_error_response :
return self . service_response ( lambda_response , { 'Content-Type' : 'application/json' , 'x-amz-function-error' : 'Unhandled' } , 200 )
return self . service_response ( lambda_response , { 'Content-Type' : 'application/json' } , 200 ) |
def connectcomponents ( idf , components , fluid = None ) :
"""rename nodes so that the components get connected
fluid is only needed if there are air and water nodes
fluid is Air or Water or ' ' .
if the fluid is Steam , use Water""" | if fluid is None :
fluid = ''
if len ( components ) == 1 :
thiscomp , thiscompnode = components [ 0 ]
initinletoutlet ( idf , thiscomp , thiscompnode , force = False )
outletnodename = getnodefieldname ( thiscomp , "Outlet_Node_Name" , fluid = fluid , startswith = thiscompnode )
thiscomp [ outletnodename ] = [ thiscomp [ outletnodename ] , thiscomp [ outletnodename ] ]
# inletnodename = getnodefieldname ( nextcomp , " Inlet _ Node _ Name " , fluid )
# nextcomp [ inletnodename ] = [ nextcomp [ inletnodename ] , betweennodename ]
return components
for i in range ( len ( components ) - 1 ) :
thiscomp , thiscompnode = components [ i ]
nextcomp , nextcompnode = components [ i + 1 ]
initinletoutlet ( idf , thiscomp , thiscompnode , force = False )
initinletoutlet ( idf , nextcomp , nextcompnode , force = False )
betweennodename = "%s_%s_node" % ( thiscomp . Name , nextcomp . Name )
outletnodename = getnodefieldname ( thiscomp , "Outlet_Node_Name" , fluid = fluid , startswith = thiscompnode )
thiscomp [ outletnodename ] = [ thiscomp [ outletnodename ] , betweennodename ]
inletnodename = getnodefieldname ( nextcomp , "Inlet_Node_Name" , fluid )
nextcomp [ inletnodename ] = [ nextcomp [ inletnodename ] , betweennodename ]
return components |
def _list_model ( self , model_cls : Type [ X ] ) -> List [ X ] :
"""List the models in this class .""" | return self . session . query ( model_cls ) . all ( ) |
def read ( self ) :
"""Reads the data stored in the files we have been initialized with . It will
ignore files that cannot be read , possibly leaving an empty configuration
: return : Nothing
: raise IOError : if a file cannot be handled""" | if self . _is_initialized :
return
self . _is_initialized = True
if not isinstance ( self . _file_or_files , ( tuple , list ) ) :
files_to_read = [ self . _file_or_files ]
else :
files_to_read = list ( self . _file_or_files )
# end assure we have a copy of the paths to handle
seen = set ( files_to_read )
num_read_include_files = 0
while files_to_read :
file_path = files_to_read . pop ( 0 )
fp = file_path
file_ok = False
if hasattr ( fp , "seek" ) :
self . _read ( fp , fp . name )
else : # assume a path if it is not a file - object
try :
with open ( file_path , 'rb' ) as fp :
file_ok = True
self . _read ( fp , fp . name )
except IOError :
continue
# Read includes and append those that we didn ' t handle yet
# We expect all paths to be normalized and absolute ( and will assure that is the case )
if self . _has_includes ( ) :
for _ , include_path in self . items ( 'include' ) :
if include_path . startswith ( '~' ) :
include_path = osp . expanduser ( include_path )
if not osp . isabs ( include_path ) :
if not file_ok :
continue
# end ignore relative paths if we don ' t know the configuration file path
assert osp . isabs ( file_path ) , "Need absolute paths to be sure our cycle checks will work"
include_path = osp . join ( osp . dirname ( file_path ) , include_path )
# end make include path absolute
include_path = osp . normpath ( include_path )
if include_path in seen or not os . access ( include_path , os . R_OK ) :
continue
seen . add ( include_path )
# insert included file to the top to be considered first
files_to_read . insert ( 0 , include_path )
num_read_include_files += 1
# each include path in configuration file
# end handle includes
# END for each file object to read
# If there was no file included , we can safely write back ( potentially ) the configuration file
# without altering it ' s meaning
if num_read_include_files == 0 :
self . _merge_includes = False |
def delete_stack ( self , stack_name ) :
"""Teardown a stack .""" | get_stack ( stack_name )
CLIENT . delete_stack ( StackName = stack_name )
DELETE_WAITER . wait ( StackName = stack_name ) |
def support_false_positive_count ( m , m_hat ) :
"""Count the number of false positive support elements in
m _ hat in one triangle , not including the diagonal .""" | m_nnz , m_hat_nnz , intersection_nnz = _nonzero_intersection ( m , m_hat )
return int ( ( m_hat_nnz - intersection_nnz ) / 2.0 ) |
def _sanitize_to_unicode ( obj ) :
"""Convert all strings records of the object to unicode
: param obj : object to sanitize to unicode .
: type obj : object
: return : Unicode string representation of the given object .
: rtype : str""" | if isinstance ( obj , dict ) :
return dict ( ( BaseGELFHandler . _sanitize_to_unicode ( k ) , BaseGELFHandler . _sanitize_to_unicode ( v ) ) for k , v in obj . items ( ) )
if isinstance ( obj , ( list , tuple ) ) :
return obj . __class__ ( [ BaseGELFHandler . _sanitize_to_unicode ( i ) for i in obj ] )
if isinstance ( obj , data ) :
obj = obj . decode ( 'utf-8' , errors = 'replace' )
return obj |
def chip_as_adjacency_list ( device : 'cirq.google.XmonDevice' , ) -> Dict [ GridQubit , List [ GridQubit ] ] :
"""Gives adjacency list representation of a chip .
The adjacency list is constructed in order of above , left _ of , below and
right _ of consecutively .
Args :
device : Chip to be converted .
Returns :
Map from nodes to list of qubits which represent all the neighbours of
given qubit .""" | c_set = set ( device . qubits )
c_adj = { }
# type : Dict [ GridQubit , List [ GridQubit ] ]
for n in device . qubits :
c_adj [ n ] = [ ]
for m in [ above ( n ) , left_of ( n ) , below ( n ) , right_of ( n ) ] :
if m in c_set :
c_adj [ n ] . append ( m )
return c_adj |
def gen_checkbox_list ( sig_dic ) :
'''For generating List view HTML file for CHECKBOX .
for each item .''' | view_zuoxiang = '''<span class="iga_pd_val">'''
dic_tmp = sig_dic [ 'dic' ]
for key in dic_tmp . keys ( ) :
tmp_str = '''{{% if "{0}" in postinfo.extinfo["{1}"] %}} {2} {{% end %}}
''' . format ( key , sig_dic [ 'en' ] , dic_tmp [ key ] )
view_zuoxiang += tmp_str
view_zuoxiang += '''</span>'''
return view_zuoxiang |
def _load_area_def ( self , dsid , file_handlers ) :
"""Load the area definition of * dsid * .""" | area_defs = [ fh . get_area_def ( dsid ) for fh in file_handlers ]
area_defs = [ area_def for area_def in area_defs if area_def is not None ]
final_area = StackedAreaDefinition ( * area_defs )
return final_area . squeeze ( ) |
def commitreturn ( self , qstring , vals = ( ) ) :
"commit and return result . This is intended for sql UPDATE . . . RETURNING" | with self . withcur ( ) as cur :
cur . execute ( qstring , vals )
return cur . fetchone ( ) |
def GetParserFromFilename ( self , path ) :
"""Returns the appropriate parser class from the filename .""" | # Find the configuration parser .
handler_name = path . split ( "://" ) [ 0 ]
for parser_cls in itervalues ( GRRConfigParser . classes ) :
if parser_cls . name == handler_name :
return parser_cls
# Handle the filename .
extension = os . path . splitext ( path ) [ 1 ]
if extension in [ ".yaml" , ".yml" ] :
return YamlParser
return ConfigFileParser |
def ssml_w ( self , words , role = None , ** kwargs ) :
"""Create a < W > element
: param words : Words to speak
: param role : Customize the pronunciation of words by specifying the word ’ s part of speech or alternate meaning
: param kwargs : additional attributes
: returns : < W > element""" | return self . nest ( SsmlW ( words , role = role , ** kwargs ) ) |
def convert_to_jbig2 ( pike , jbig2_groups , root , log , options ) :
"""Convert images to JBIG2 and insert into PDF .
When the JBIG2 page group size is > 1 we do several JBIG2 images at once
and build a symbol dictionary that will span several pages . Each JBIG2
image must reference to its symbol dictionary . If too many pages shared the
same dictionary JBIG2 encoding becomes more expensive and less efficient .
The default value of 10 was determined through testing . Currently this
must be lossy encoding since jbig2enc does not support refinement coding .
When the JBIG2 symbolic coder is not used , each JBIG2 stands on its own
and needs no dictionary . Currently this must be lossless JBIG2.""" | _produce_jbig2_images ( jbig2_groups , root , log , options )
for group , xref_exts in jbig2_groups . items ( ) :
prefix = f'group{group:08d}'
jbig2_symfile = root / ( prefix + '.sym' )
if jbig2_symfile . exists ( ) :
jbig2_globals_data = jbig2_symfile . read_bytes ( )
jbig2_globals = pikepdf . Stream ( pike , jbig2_globals_data )
jbig2_globals_dict = Dictionary ( JBIG2Globals = jbig2_globals )
elif options . jbig2_page_group_size == 1 :
jbig2_globals_dict = None
else :
raise FileNotFoundError ( jbig2_symfile )
for n , xref_ext in enumerate ( xref_exts ) :
xref , _ = xref_ext
jbig2_im_file = root / ( prefix + f'.{n:04d}' )
jbig2_im_data = jbig2_im_file . read_bytes ( )
im_obj = pike . get_object ( xref , 0 )
im_obj . write ( jbig2_im_data , filter = Name . JBIG2Decode , decode_parms = jbig2_globals_dict ) |
def Call ( func_name , args = None , prefix = None ) :
"""A function call""" | node = Node ( syms . power , [ func_name , ArgList ( args ) ] )
if prefix is not None :
node . prefix = prefix
return node |
def parse_charset ( header_string ) :
'''Parse a " Content - Type " string for the document encoding .
Returns :
str , None''' | match = re . search ( r'''charset[ ]?=[ ]?["']?([a-z0-9_-]+)''' , header_string , re . IGNORECASE )
if match :
return match . group ( 1 ) |
async def run_asgi ( self ) :
"""Wrapper around the ASGI callable , handling exceptions and unexpected
termination states .""" | try :
result = await self . app ( self . scope , self . asgi_receive , self . asgi_send )
except BaseException as exc :
self . closed_event . set ( )
msg = "Exception in ASGI application\n"
self . logger . error ( msg , exc_info = exc )
if not self . handshake_started_event . is_set ( ) :
self . send_500_response ( )
else :
await self . handshake_completed_event . wait ( )
self . transport . close ( )
else :
self . closed_event . set ( )
if not self . handshake_started_event . is_set ( ) :
msg = "ASGI callable returned without sending handshake."
self . logger . error ( msg )
self . send_500_response ( )
self . transport . close ( )
elif result is not None :
msg = "ASGI callable should return None, but returned '%s'."
self . logger . error ( msg , result )
await self . handshake_completed_event . wait ( )
self . transport . close ( ) |
def determine_num_chunks ( chunk_size , file_size ) :
"""Figure out how many pieces we are sending the file in .
NOTE : duke - data - service requires an empty chunk to be uploaded for empty files .""" | if file_size == 0 :
return 1
return int ( math . ceil ( float ( file_size ) / float ( chunk_size ) ) ) |
def mentioned_in ( self , message ) :
"""Checks if the user is mentioned in the specified message .
Parameters
message : : class : ` Message `
The message to check if you ' re mentioned in .""" | if message . mention_everyone :
return True
for user in message . mentions :
if user . id == self . id :
return True
return False |
def _load ( self ) :
"""Load the database from its ` ` dbfile ` ` if it has one""" | if self . dbfile is not None :
with open ( self . dbfile , 'r' ) as f :
self . _db = json . loads ( f . read ( ) )
else :
self . _db = { } |
def install_json_output_variables ( self , ij = None ) :
"""Return install . json output variables in a dict with name param as key .
Args :
ij ( dict , optional ) : Defaults to None . The install . json contents .
Returns :
dict : A dictionary containing the install . json output variables with name as key .""" | if self . _install_json_output_variables is None or ij is not None :
self . _install_json_output_variables = { }
# TODO : currently there is no support for projects with multiple install . json files .
if ij is None :
ij = self . install_json
for p in ij . get ( 'playbook' , { } ) . get ( 'outputVariables' ) or [ ] :
self . _install_json_output_variables . setdefault ( p . get ( 'name' ) , [ ] ) . append ( p )
return self . _install_json_output_variables |
def AppConfigFlagHandler ( feature = None ) :
"""This is the default handler . It checks for feature flags in the current app ' s configuration .
For example , to have ' unfinished _ feature ' hidden in production but active in development :
config . py
class ProductionConfig ( Config ) :
FEATURE _ FLAGS = {
' unfinished _ feature ' : False ,
class DevelopmentConfig ( Config ) :
FEATURE _ FLAGS = {
' unfinished _ feature ' : True ,""" | if not current_app :
log . warn ( u"Got a request to check for {feature} but we're outside the request context. Returning False" . format ( feature = feature ) )
return False
try :
return current_app . config [ FEATURE_FLAGS_CONFIG ] [ feature ]
except ( AttributeError , KeyError ) :
raise NoFeatureFlagFound ( ) |
def get_X_spline ( x , knots , n_bases = 10 , spline_order = 3 , add_intercept = True ) :
"""Returns :
np . array of shape [ len ( x ) , n _ bases + ( add _ intercept ) ]
# BSpline formula
https : / / docs . scipy . org / doc / scipy / reference / generated / scipy . interpolate . BSpline . html # scipy . interpolate . BSpline
Fortran code :
https : / / github . com / scipy / scipy / blob / v0.19.0 / scipy / interpolate / fitpack / splev . f""" | if len ( x . shape ) is not 1 :
raise ValueError ( "x has to be 1 dimentional" )
tck = [ knots , np . zeros ( n_bases ) , spline_order ]
X = np . zeros ( [ len ( x ) , n_bases ] )
for i in range ( n_bases ) :
vec = np . zeros ( n_bases )
vec [ i ] = 1.0
tck [ 1 ] = vec
X [ : , i ] = si . splev ( x , tck , der = 0 )
if add_intercept is True :
ones = np . ones_like ( X [ : , : 1 ] )
X = np . hstack ( [ ones , X ] )
return X . astype ( np . float32 ) |
def safe_unicode ( string ) :
"""If Python 2 , replace non - ascii characters and return encoded string .""" | if not PY3 :
uni = string . replace ( u'\u2019' , "'" )
return uni . encode ( 'utf-8' )
return string |
def health ( self , indices = None , level = "cluster" , wait_for_status = None , wait_for_relocating_shards = None , timeout = 30 ) :
"""Check the current : ref : ` cluster health < es - guide - reference - api - admin - cluster - health > ` .
Request Parameters
The cluster health API accepts the following request parameters :
: param level : Can be one of cluster , indices or shards . Controls the
details level of the health information returned .
Defaults to * cluster * .
: param wait _ for _ status : One of green , yellow or red . Will wait ( until
the timeout provided ) until the status of the
cluster changes to the one provided .
By default , will not wait for any status .
: param wait _ for _ relocating _ shards : A number controlling to how many
relocating shards to wait for .
Usually will be 0 to indicate to
wait till all relocation have
happened . Defaults to not to wait .
: param timeout : A time based parameter controlling how long to wait
if one of the wait _ for _ XXX are provided .
Defaults to 30s .""" | if indices :
path = make_path ( "_cluster" , "health" , "," . join ( indices ) )
else :
path = make_path ( "_cluster" , "health" )
mapping = { }
if level != "cluster" :
if level not in [ "cluster" , "indices" , "shards" ] :
raise ValueError ( "Invalid level: %s" % level )
mapping [ 'level' ] = level
if wait_for_status :
if wait_for_status not in [ "green" , "yellow" , "red" ] :
raise ValueError ( "Invalid wait_for_status: %s" % wait_for_status )
mapping [ 'wait_for_status' ] = wait_for_status
mapping [ 'timeout' ] = "%ds" % timeout
return self . conn . _send_request ( 'GET' , path , params = mapping ) |
def select_db ( self , db ) :
'''Set current db''' | yield self . _execute_command ( COMMAND . COM_INIT_DB , db )
yield self . _read_ok_packet ( ) |
def new ( self , attribute , operation = ChainOperator . AND ) :
"""Combine with a new query
: param str attribute : attribute of new query
: param ChainOperator operation : operation to combine to new query
: rtype : Query""" | if isinstance ( operation , str ) :
operation = ChainOperator ( operation )
self . _chain = operation
self . _attribute = self . _get_mapping ( attribute ) if attribute else None
self . _negation = False
return self |
def tag_related ( self , query , category = None ) :
"""Get related tags .
Parameters :
query ( str ) : The tag to find the related tags for .
category ( str ) : If specified , show only tags of a specific
category . Can be : General 0 , Artist 1 , Copyright
3 and Character 4.""" | params = { 'query' : query , 'category' : category }
return self . _get ( 'related_tag.json' , params ) |
def union ( self , rdds ) :
"""Build the union of a list of RDDs .
This supports unions ( ) of RDDs with different serialized formats ,
although this forces them to be reserialized using the default
serializer :
> > > path = os . path . join ( tempdir , " union - text . txt " )
> > > with open ( path , " w " ) as testFile :
. . . _ = testFile . write ( " Hello " )
> > > textFile = sc . textFile ( path )
> > > textFile . collect ( )
[ u ' Hello ' ]
> > > parallelized = sc . parallelize ( [ " World ! " ] )
> > > sorted ( sc . union ( [ textFile , parallelized ] ) . collect ( ) )
[ u ' Hello ' , ' World ! ' ]""" | first_jrdd_deserializer = rdds [ 0 ] . _jrdd_deserializer
if any ( x . _jrdd_deserializer != first_jrdd_deserializer for x in rdds ) :
rdds = [ x . _reserialize ( ) for x in rdds ]
cls = SparkContext . _jvm . org . apache . spark . api . java . JavaRDD
jrdds = SparkContext . _gateway . new_array ( cls , len ( rdds ) )
for i in range ( 0 , len ( rdds ) ) :
jrdds [ i ] = rdds [ i ] . _jrdd
return RDD ( self . _jsc . union ( jrdds ) , self , rdds [ 0 ] . _jrdd_deserializer ) |
def user_exists_in_group ( user_name , group_name , region = None , key = None , keyid = None , profile = None ) :
'''Check if user exists in group .
. . versionadded : : 2015.8.0
CLI Example :
. . code - block : : bash
salt myminion boto _ iam . user _ exists _ in _ group myuser mygroup''' | # TODO this should probably use boto . iam . get _ groups _ for _ user
users = get_group_members ( group_name = group_name , region = region , key = key , keyid = keyid , profile = profile )
if users :
for _user in users :
if user_name == _user [ 'user_name' ] :
log . debug ( 'IAM user %s is already in IAM group %s.' , user_name , group_name )
return True
return False |
def delete ( self , space_no , * args ) :
"""delete tuple by primary key""" | d = self . replyQueue . get ( )
packet = RequestDelete ( self . charset , self . errors , d . _ipro_request_id , space_no , 0 , * args )
self . transport . write ( bytes ( packet ) )
return d . addCallback ( self . handle_reply , self . charset , self . errors , None ) |
def batch ( samples ) :
"""CWL : batch together per sample , joint and germline calls for ensemble combination .
Sets up groups of same sample / batch variant calls for ensemble calling , as
long as we have more than one caller per group .""" | samples = [ utils . to_single_data ( x ) for x in samples ]
sample_order = [ dd . get_sample_name ( x ) for x in samples ]
batch_groups = collections . defaultdict ( list )
for data in samples :
batch_samples = tuple ( data . get ( "batch_samples" , [ dd . get_sample_name ( data ) ] ) )
batch_groups [ ( batch_samples , dd . get_phenotype ( data ) ) ] . append ( data )
out = [ ]
for ( batch_samples , phenotype ) , gsamples in batch_groups . items ( ) :
if len ( gsamples ) > 1 :
batches = set ( [ ] )
for d in gsamples :
batches |= set ( dd . get_batches ( d ) )
gsamples . sort ( key = dd . get_variantcaller_order )
cur = copy . deepcopy ( gsamples [ 0 ] )
cur . update ( { "batch_id" : sorted ( list ( batches ) ) [ 0 ] if batches else "_" . join ( batch_samples ) , "batch_samples" : batch_samples , "variants" : { "variantcallers" : [ dd . get_variantcaller ( d ) for d in gsamples ] , "calls" : [ d . get ( "vrn_file" ) for d in gsamples ] } } )
out . append ( cur )
def by_original_order ( d ) :
return min ( [ sample_order . index ( s ) for s in d [ "batch_samples" ] if s in sample_order ] )
return sorted ( out , key = by_original_order ) |
def _PrintAnalysisStatusHeader ( self , processing_status ) :
"""Prints the analysis status header .
Args :
processing _ status ( ProcessingStatus ) : processing status .""" | self . _output_writer . Write ( 'Storage file\t\t: {0:s}\n' . format ( self . _storage_file_path ) )
self . _PrintProcessingTime ( processing_status )
if processing_status and processing_status . events_status :
self . _PrintEventsStatus ( processing_status . events_status )
self . _output_writer . Write ( '\n' ) |
def _AddEqualsMethod ( message_descriptor , cls ) :
"""Helper for _ AddMessageMethods ( ) .""" | def __eq__ ( self , other ) :
if ( not isinstance ( other , message_mod . Message ) or other . DESCRIPTOR != self . DESCRIPTOR ) :
return False
if self is other :
return True
if self . DESCRIPTOR . full_name == _AnyFullTypeName :
any_a = _InternalUnpackAny ( self )
any_b = _InternalUnpackAny ( other )
if any_a and any_b :
return any_a == any_b
if not self . ListFields ( ) == other . ListFields ( ) :
return False
# Sort unknown fields because their order shouldn ' t affect equality test .
unknown_fields = list ( self . _unknown_fields )
unknown_fields . sort ( )
other_unknown_fields = list ( other . _unknown_fields )
other_unknown_fields . sort ( )
return unknown_fields == other_unknown_fields
cls . __eq__ = __eq__ |
def convert_sequence_to_motor_units ( cycles , unit_converter ) :
"""Converts a move sequence to motor units .
Converts a move sequence to motor units using the provied converter .
Parameters
cycles : iterable of dicts
The iterable of cycles of motion to do one after another . See
` ` compile _ sequence ` ` for format .
unit _ converter : UnitConverter , optional
` ` GeminiMotorDrive . utilities . UnitConverter ` ` to use to convert
the units in ` cycles ` to motor units .
Returns
motor _ cycles : list of dicts
A deep copy of ` cycles ` with all units converted to motor units .
See Also
compile _ sequence
GeminiMotorDrive . utilities . UnitConverter""" | # Make a deep copy of cycles so that the conversions don ' t damage
# the original one .
cv_cycles = copy . deepcopy ( cycles )
# Go through each cycle and do the conversions .
for cycle in cv_cycles : # Go through each of the moves and do the conversions .
for move in cycle [ 'moves' ] :
move [ 'A' ] = unit_converter . to_motor_velocity_acceleration ( move [ 'A' ] )
move [ 'AD' ] = unit_converter . to_motor_velocity_acceleration ( move [ 'AD' ] )
move [ 'V' ] = unit_converter . to_motor_velocity_acceleration ( move [ 'V' ] )
move [ 'D' ] = int ( unit_converter . to_motor_distance ( move [ 'D' ] ) )
# Now return the converted move sequence .
return cv_cycles |
def _format_subtree ( self , subtree ) :
"""Recursively format all subtrees .""" | subtree [ 'children' ] = list ( subtree [ 'children' ] . values ( ) )
for child in subtree [ 'children' ] :
self . _format_subtree ( child )
return subtree |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.