signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def groupby ( self , key_column_names , operations , * args ) :
"""Perform a group on the key _ column _ names followed by aggregations on the
columns listed in operations .
The operations parameter is a dictionary that indicates which
aggregation operators to use and which columns to use them on . The
available operators are SUM , MAX , MIN , COUNT , AVG , VAR , STDV , CONCAT ,
SELECT _ ONE , ARGMIN , ARGMAX , and QUANTILE . For convenience , aggregators
MEAN , STD , and VARIANCE are available as synonyms for AVG , STDV , and
VAR . See : mod : ` ~ turicreate . aggregate ` for more detail on the aggregators .
Parameters
key _ column _ names : string | list [ string ]
Column ( s ) to group by . Key columns can be of any type other than
dictionary .
operations : dict , list
Dictionary of columns and aggregation operations . Each key is a
output column name and each value is an aggregator . This can also
be a list of aggregators , in which case column names will be
automatically assigned .
* args
All other remaining arguments will be interpreted in the same
way as the operations argument .
Returns
out _ sf : SFrame
A new SFrame , with a column for each groupby column and each
aggregation operation .
See Also
aggregate
Notes
* Numeric aggregators ( such as sum , mean , stdev etc . ) follow the skip
None policy i . e they will omit all missing values from the aggregation .
As an example , ` sum ( [ None , 5 , 10 ] ) = 15 ` because the ` None ` value is
skipped .
* Aggregators have a default value when no values ( after skipping all
` None ` values ) are present . Default values are ` None ` for [ ' ARGMAX ' ,
' ARGMIN ' , ' AVG ' , ' STD ' , ' MEAN ' , ' MIN ' , ' MAX ' ] , ` 0 ` for [ ' COUNT '
' COUNT _ DISTINCT ' , ' DISTINCT ' ] ` [ ] ` for ' CONCAT ' , ' QUANTILE ' ,
' DISTINCT ' , and ` { } ` for ' FREQ _ COUNT ' .
Examples
Suppose we have an SFrame with movie ratings by many users .
> > > import turicreate . aggregate as agg
> > > url = ' https : / / static . turi . com / datasets / rating _ data _ example . csv '
> > > sf = turicreate . SFrame . read _ csv ( url )
> > > sf
| user _ id | movie _ id | rating |
| 25904 | 1663 | 3 |
| 25907 | 1663 | 3 |
| 25923 | 1663 | 3 |
| 25924 | 1663 | 3 |
| 25928 | 1663 | 2 |
| 25933 | 1663 | 4 |
| 25934 | 1663 | 4 |
| 25935 | 1663 | 4 |
| 25936 | 1663 | 5 |
| 25937 | 1663 | 2 |
[10000 rows x 3 columns ]
Compute the number of occurrences of each user .
> > > user _ count = sf . groupby ( key _ column _ names = ' user _ id ' ,
. . . operations = { ' count ' : agg . COUNT ( ) } )
> > > user _ count
| user _ id | count |
| 62361 | 1 |
| 30727 | 1 |
| 40111 | 1 |
| 50513 | 1 |
| 35140 | 1 |
| 42352 | 1 |
| 29667 | 1 |
| 46242 | 1 |
| 58310 | 1 |
| 64614 | 1 |
[9852 rows x 2 columns ]
Compute the mean and standard deviation of ratings per user .
> > > user _ rating _ stats = sf . groupby ( key _ column _ names = ' user _ id ' ,
. . . operations = {
. . . ' mean _ rating ' : agg . MEAN ( ' rating ' ) ,
. . . ' std _ rating ' : agg . STD ( ' rating ' )
> > > user _ rating _ stats
| user _ id | mean _ rating | std _ rating |
| 62361 | 5.0 | 0.0 |
| 30727 | 4.0 | 0.0 |
| 40111 | 2.0 | 0.0 |
| 50513 | 4.0 | 0.0 |
| 35140 | 4.0 | 0.0 |
| 42352 | 5.0 | 0.0 |
| 29667 | 4.0 | 0.0 |
| 46242 | 5.0 | 0.0 |
| 58310 | 2.0 | 0.0 |
| 64614 | 2.0 | 0.0 |
[9852 rows x 3 columns ]
Compute the movie with the minimum rating per user .
> > > chosen _ movies = sf . groupby ( key _ column _ names = ' user _ id ' ,
. . . operations = {
. . . ' worst _ movies ' : agg . ARGMIN ( ' rating ' , ' movie _ id ' )
> > > chosen _ movies
| user _ id | worst _ movies |
| 62361 | 1663 |
| 30727 | 1663 |
| 40111 | 1663 |
| 50513 | 1663 |
| 35140 | 1663 |
| 42352 | 1663 |
| 29667 | 1663 |
| 46242 | 1663 |
| 58310 | 1663 |
| 64614 | 1663 |
[9852 rows x 2 columns ]
Compute the movie with the max rating per user and also the movie with
the maximum imdb - ranking per user .
> > > sf [ ' imdb - ranking ' ] = sf [ ' rating ' ] * 10
> > > chosen _ movies = sf . groupby ( key _ column _ names = ' user _ id ' ,
. . . operations = { ( ' max _ rating _ movie ' , ' max _ imdb _ ranking _ movie ' ) : agg . ARGMAX ( ( ' rating ' , ' imdb - ranking ' ) , ' movie _ id ' ) } )
> > > chosen _ movies
| user _ id | max _ rating _ movie | max _ imdb _ ranking _ movie |
| 62361 | 1663 | 16630 |
| 30727 | 1663 | 16630 |
| 40111 | 1663 | 16630 |
| 50513 | 1663 | 16630 |
| 35140 | 1663 | 16630 |
| 42352 | 1663 | 16630 |
| 29667 | 1663 | 16630 |
| 46242 | 1663 | 16630 |
| 58310 | 1663 | 16630 |
| 64614 | 1663 | 16630 |
[9852 rows x 3 columns ]
Compute the movie with the max rating per user .
> > > chosen _ movies = sf . groupby ( key _ column _ names = ' user _ id ' ,
operations = { ' best _ movies ' : agg . ARGMAX ( ' rating ' , ' movie ' ) } )
Compute the movie with the max rating per user and also the movie with the maximum imdb - ranking per user .
> > > chosen _ movies = sf . groupby ( key _ column _ names = ' user _ id ' ,
operations = { ( ' max _ rating _ movie ' , ' max _ imdb _ ranking _ movie ' ) : agg . ARGMAX ( ( ' rating ' , ' imdb - ranking ' ) , ' movie ' ) } )
Compute the count , mean , and standard deviation of ratings per ( user ,
time ) , automatically assigning output column names .
> > > sf [ ' time ' ] = sf . apply ( lambda x : ( x [ ' user _ id ' ] + x [ ' movie _ id ' ] ) % 11 + 2000)
> > > user _ rating _ stats = sf . groupby ( [ ' user _ id ' , ' time ' ] ,
. . . [ agg . COUNT ( ) ,
. . . agg . AVG ( ' rating ' ) ,
. . . agg . STDV ( ' rating ' ) ] )
> > > user _ rating _ stats
| time | user _ id | Count | Avg of rating | Stdv of rating |
| 2006 | 61285 | 1 | 4.0 | 0.0 |
| 2000 | 36078 | 1 | 4.0 | 0.0 |
| 2003 | 47158 | 1 | 3.0 | 0.0 |
| 2007 | 34446 | 1 | 3.0 | 0.0 |
| 2010 | 47990 | 1 | 3.0 | 0.0 |
| 2003 | 42120 | 1 | 5.0 | 0.0 |
| 2007 | 44940 | 1 | 4.0 | 0.0 |
| 2008 | 58240 | 1 | 4.0 | 0.0 |
| 2002 | 102 | 1 | 1.0 | 0.0 |
| 2009 | 52708 | 1 | 3.0 | 0.0 |
[10000 rows x 5 columns ]
The groupby function can take a variable length list of aggregation
specifiers so if we want the count and the 0.25 and 0.75 quantiles of
ratings :
> > > user _ rating _ stats = sf . groupby ( [ ' user _ id ' , ' time ' ] , agg . COUNT ( ) ,
. . . { ' rating _ quantiles ' : agg . QUANTILE ( ' rating ' , [ 0.25 , 0.75 ] ) } )
> > > user _ rating _ stats
| time | user _ id | Count | rating _ quantiles |
| 2006 | 61285 | 1 | array ( ' d ' , [ 4.0 , 4.0 ] ) |
| 2000 | 36078 | 1 | array ( ' d ' , [ 4.0 , 4.0 ] ) |
| 2003 | 47158 | 1 | array ( ' d ' , [ 3.0 , 3.0 ] ) |
| 2007 | 34446 | 1 | array ( ' d ' , [ 3.0 , 3.0 ] ) |
| 2010 | 47990 | 1 | array ( ' d ' , [ 3.0 , 3.0 ] ) |
| 2003 | 42120 | 1 | array ( ' d ' , [ 5.0 , 5.0 ] ) |
| 2007 | 44940 | 1 | array ( ' d ' , [ 4.0 , 4.0 ] ) |
| 2008 | 58240 | 1 | array ( ' d ' , [ 4.0 , 4.0 ] ) |
| 2002 | 102 | 1 | array ( ' d ' , [ 1.0 , 1.0 ] ) |
| 2009 | 52708 | 1 | array ( ' d ' , [ 3.0 , 3.0 ] ) |
[10000 rows x 4 columns ]
To put all items a user rated into one list value by their star rating :
> > > user _ rating _ stats = sf . groupby ( [ " user _ id " , " rating " ] ,
. . . { " rated _ movie _ ids " : agg . CONCAT ( " movie _ id " ) } )
> > > user _ rating _ stats
| rating | user _ id | rated _ movie _ ids |
| 3 | 31434 | array ( ' d ' , [ 1663.0 ] ) |
| 5 | 25944 | array ( ' d ' , [ 1663.0 ] ) |
| 4 | 38827 | array ( ' d ' , [ 1663.0 ] ) |
| 4 | 51437 | array ( ' d ' , [ 1663.0 ] ) |
| 4 | 42549 | array ( ' d ' , [ 1663.0 ] ) |
| 4 | 49532 | array ( ' d ' , [ 1663.0 ] ) |
| 3 | 26124 | array ( ' d ' , [ 1663.0 ] ) |
| 4 | 46336 | array ( ' d ' , [ 1663.0 ] ) |
| 4 | 52133 | array ( ' d ' , [ 1663.0 ] ) |
| 5 | 62361 | array ( ' d ' , [ 1663.0 ] ) |
[9952 rows x 3 columns ]
To put all items and rating of a given user together into a dictionary
value :
> > > user _ rating _ stats = sf . groupby ( " user _ id " ,
. . . { " movie _ rating " : agg . CONCAT ( " movie _ id " , " rating " ) } )
> > > user _ rating _ stats
| user _ id | movie _ rating |
| 62361 | { 1663 : 5 } |
| 30727 | { 1663 : 4 } |
| 40111 | { 1663 : 2 } |
| 50513 | { 1663 : 4 } |
| 35140 | { 1663 : 4 } |
| 42352 | { 1663 : 5 } |
| 29667 | { 1663 : 4 } |
| 46242 | { 1663 : 5 } |
| 58310 | { 1663 : 2 } |
| 64614 | { 1663 : 2 } |
[9852 rows x 2 columns ]""" | # some basic checking first
# make sure key _ column _ names is a list
if isinstance ( key_column_names , str ) :
key_column_names = [ key_column_names ]
# check that every column is a string , and is a valid column name
my_column_names = self . column_names ( )
key_columns_array = [ ]
for column in key_column_names :
if not isinstance ( column , str ) :
raise TypeError ( "Column name must be a string" )
if column not in my_column_names :
raise KeyError ( "Column " + column + " does not exist in SFrame" )
if self [ column ] . dtype == dict :
raise TypeError ( "Cannot group on a dictionary column." )
key_columns_array . append ( column )
group_output_columns = [ ]
group_columns = [ ]
group_ops = [ ]
all_ops = [ operations ] + list ( args )
for op_entry in all_ops : # if it is not a dict , nor a list , it is just a single aggregator
# element ( probably COUNT ) . wrap it in a list so we can reuse the
# list processing code
operation = op_entry
if not ( isinstance ( operation , list ) or isinstance ( operation , dict ) ) :
operation = [ operation ]
if isinstance ( operation , dict ) : # now sweep the dict and add to group _ columns and group _ ops
for key in operation :
val = operation [ key ]
if type ( val ) is tuple :
( op , column ) = val
if ( op == '__builtin__avg__' and self [ column [ 0 ] ] . dtype in [ array . array , numpy . ndarray ] ) :
op = '__builtin__vector__avg__'
if ( op == '__builtin__sum__' and self [ column [ 0 ] ] . dtype in [ array . array , numpy . ndarray ] ) :
op = '__builtin__vector__sum__'
if ( op == '__builtin__argmax__' or op == '__builtin__argmin__' ) and ( ( type ( column [ 0 ] ) is tuple ) != ( type ( key ) is tuple ) ) :
raise TypeError ( "Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string." )
if ( op == '__builtin__argmax__' or op == '__builtin__argmin__' ) and type ( column [ 0 ] ) is tuple :
for ( col , output ) in zip ( column [ 0 ] , key ) :
group_columns = group_columns + [ [ col , column [ 1 ] ] ]
group_ops = group_ops + [ op ]
group_output_columns = group_output_columns + [ output ]
else :
group_columns = group_columns + [ column ]
group_ops = group_ops + [ op ]
group_output_columns = group_output_columns + [ key ]
if ( op == '__builtin__concat__dict__' ) :
key_column = column [ 0 ]
key_column_type = self . select_column ( key_column ) . dtype
if not key_column_type in ( int , float , str ) :
raise TypeError ( 'CONCAT key column must be int, float or str type' )
elif val == aggregate . COUNT :
group_output_columns = group_output_columns + [ key ]
val = aggregate . COUNT ( )
( op , column ) = val
group_columns = group_columns + [ column ]
group_ops = group_ops + [ op ]
else :
raise TypeError ( "Unexpected type in aggregator definition of output column: " + key )
elif isinstance ( operation , list ) : # we will be using automatically defined column names
for val in operation :
if type ( val ) is tuple :
( op , column ) = val
if ( op == '__builtin__avg__' and self [ column [ 0 ] ] . dtype in [ array . array , numpy . ndarray ] ) :
op = '__builtin__vector__avg__'
if ( op == '__builtin__sum__' and self [ column [ 0 ] ] . dtype in [ array . array , numpy . ndarray ] ) :
op = '__builtin__vector__sum__'
if ( op == '__builtin__argmax__' or op == '__builtin__argmin__' ) and type ( column [ 0 ] ) is tuple :
for col in column [ 0 ] :
group_columns = group_columns + [ [ col , column [ 1 ] ] ]
group_ops = group_ops + [ op ]
group_output_columns = group_output_columns + [ "" ]
else :
group_columns = group_columns + [ column ]
group_ops = group_ops + [ op ]
group_output_columns = group_output_columns + [ "" ]
if ( op == '__builtin__concat__dict__' ) :
key_column = column [ 0 ]
key_column_type = self . select_column ( key_column ) . dtype
if not key_column_type in ( int , float , str ) :
raise TypeError ( 'CONCAT key column must be int, float or str type' )
elif val == aggregate . COUNT :
group_output_columns = group_output_columns + [ "" ]
val = aggregate . COUNT ( )
( op , column ) = val
group_columns = group_columns + [ column ]
group_ops = group_ops + [ op ]
else :
raise TypeError ( "Unexpected type in aggregator definition." )
# let ' s validate group _ columns and group _ ops are valid
for ( cols , op ) in zip ( group_columns , group_ops ) :
for col in cols :
if not isinstance ( col , str ) :
raise TypeError ( "Column name must be a string" )
if not isinstance ( op , str ) :
raise TypeError ( "Operation type not recognized." )
if op is not aggregate . COUNT ( ) [ 0 ] :
for col in cols :
if col not in my_column_names :
raise KeyError ( "Column " + col + " does not exist in SFrame" )
with cython_context ( ) :
return SFrame ( _proxy = self . __proxy__ . groupby_aggregate ( key_columns_array , group_columns , group_output_columns , group_ops ) ) |
def get_plugins_info ( self ) :
"""Collect the current live info from all the registered plugins .
Return a dictionary , keyed on the plugin name .""" | d = { }
for p in self . plugins :
d . update ( p . get_info ( ) )
return d |
def body_block_attribution ( tag ) :
"extract the attribution content for figures , tables , videos" | attributions = [ ]
if raw_parser . attrib ( tag ) :
for attrib_tag in raw_parser . attrib ( tag ) :
attributions . append ( node_contents_str ( attrib_tag ) )
if raw_parser . permissions ( tag ) : # concatenate content from from the permissions tag
for permissions_tag in raw_parser . permissions ( tag ) :
attrib_string = ''
# add the copyright statement if found
attrib_string = join_sentences ( attrib_string , node_contents_str ( raw_parser . copyright_statement ( permissions_tag ) ) , '.' )
# add the license paragraphs
if raw_parser . licence_p ( permissions_tag ) :
for licence_p_tag in raw_parser . licence_p ( permissions_tag ) :
attrib_string = join_sentences ( attrib_string , node_contents_str ( licence_p_tag ) , '.' )
if attrib_string != '' :
attributions . append ( attrib_string )
return attributions |
def mkdir_p ( self , mode = 0o777 ) :
"""Like : meth : ` mkdir ` , but does not raise an exception if the
directory already exists .""" | with contextlib . suppress ( FileExistsError ) :
self . mkdir ( mode )
return self |
def clean_url ( url , force_scheme = None ) :
"""Cleans the given URL .""" | # URL should be ASCII according to RFC 3986
url = str ( url )
# Collapse . . / . . / and related
url_parts = urlparse . urlparse ( url )
path_parts = [ ]
for part in url_parts . path . split ( '/' ) :
if part == '.' :
continue
elif part == '..' :
if path_parts :
path_parts . pop ( )
else :
path_parts . append ( part )
url_parts = list ( url_parts )
if force_scheme :
url_parts [ 0 ] = force_scheme
url_parts [ 2 ] = '/' . join ( path_parts )
if FLAGS . keep_query_string == False :
url_parts [ 4 ] = ''
# No query string
url_parts [ 5 ] = ''
# No path
# Always have a trailing slash
if not url_parts [ 2 ] :
url_parts [ 2 ] = '/'
return urlparse . urlunparse ( url_parts ) |
def get_object_metadata ( self , container , obj , prefix = None ) :
"""Returns the metadata for the specified object as a dict .""" | return self . _manager . get_object_metadata ( container , obj , prefix = prefix ) |
def zipLists ( * lists ) :
"""Checks to see if all of the lists are the same length , and throws
an AssertionError otherwise . Returns the zipped lists .""" | length = len ( lists [ 0 ] )
for i , list_ in enumerate ( lists [ 1 : ] ) :
if len ( list_ ) != length :
msg = "List at index {} has length {} != {}" . format ( i + 1 , len ( list_ ) , length )
raise AssertionError ( msg )
return zip ( * lists ) |
def cposr ( string , chars , start ) :
"""Find the first occurrence in a string of a character belonging
to a collection of characters , starting at a specified location ,
searching in reverse .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / cposr _ c . html
: param string : Any character string .
: type string : str
: param chars : A collection of characters .
: type chars : str
: param start : Position to begin looking for one of chars .
: type start : int
: return :
The index of the last character of str at or
before index start that is in the collection chars .
: rtype : int""" | string = stypes . stringToCharP ( string )
chars = stypes . stringToCharP ( chars )
start = ctypes . c_int ( start )
return libspice . cposr_c ( string , chars , start ) |
def get_ancestors ( self , include_self = False , depth = None ) :
"""Return all the ancestors of this object .""" | if self . is_root_node ( ) :
if not include_self :
return self . _toplevel ( ) . objects . none ( )
else : # Filter on pk for efficiency .
return self . _toplevel ( ) . objects . filter ( pk = self . pk )
params = { "%s__child" % self . _closure_parentref ( ) : self . pk }
if depth is not None :
params [ "%s__depth__lte" % self . _closure_parentref ( ) ] = depth
ancestors = self . _toplevel ( ) . objects . filter ( ** params )
if not include_self :
ancestors = ancestors . exclude ( pk = self . pk )
return ancestors . order_by ( "%s__depth" % self . _closure_parentref ( ) ) |
def write_to_file ( data , path ) :
"""Export extracted fields to json
Appends . json to path if missing and generates json file in specified directory , if not then in root
Parameters
data : dict
Dictionary of extracted fields
path : str
directory to save generated json file
Notes
Do give file name to the function parameter path .
Examples
> > > from invoice2data . output import to _ json
> > > to _ json . write _ to _ file ( data , " / exported _ json / invoice . json " )
> > > to _ json . write _ to _ file ( data , " invoice . json " )""" | if path . endswith ( '.json' ) :
filename = path
else :
filename = path + '.json'
with codecs . open ( filename , "w" , encoding = 'utf-8' ) as json_file :
for line in data :
line [ 'date' ] = line [ 'date' ] . strftime ( '%d/%m/%Y' )
print ( type ( json ) )
print ( json )
json . dump ( data , json_file , indent = 4 , sort_keys = True , default = myconverter , ensure_ascii = False ) |
def format_field ( self , value , format_spec ) :
"""Format specifiers are described in : func : ` format _ field ` which is a
static function .""" | if format_spec :
spec , arg = format_spec [ 0 ] , format_spec [ 1 : ]
arg = arg or None
else :
spec = arg = None
return self . _format_field ( spec , arg , value , self . numeric_locale ) |
def configure ( self , ext ) :
"""Configures the given Extension object using this build configuration .""" | ext . include_dirs += self . include_dirs
ext . library_dirs += self . library_dirs
ext . libraries += self . libraries
ext . extra_compile_args += self . extra_compile_args
ext . extra_link_args += self . extra_link_args
ext . extra_objects += self . extra_objects |
def _FormatHostname ( self , event ) :
"""Formats the hostname .
Args :
event ( EventObject ) : event .
Returns :
str : formatted hostname field .""" | hostname = self . _output_mediator . GetHostname ( event )
return self . _FormatField ( hostname ) |
def _set_slot ( self , v , load = False ) :
"""Setter method for slot , mapped from YANG variable / qos / cpu / slot ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ slot is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ slot ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "slot_id" , slot . slot , yang_name = "slot" , rest_name = "slot" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'slot-id' , extensions = { u'tailf-common' : { u'info' : u'Configure CPU QoS on slot' , u'cli-suppress-mode' : None , u'cli-incomplete-no' : None , u'cli-sequence-commands' : None , u'cli-incomplete-command' : None , u'callpoint' : u'QosCpuPortConfig' } } ) , is_container = 'list' , yang_name = "slot" , rest_name = "slot" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure CPU QoS on slot' , u'cli-suppress-mode' : None , u'cli-incomplete-no' : None , u'cli-sequence-commands' : None , u'cli-incomplete-command' : None , u'callpoint' : u'QosCpuPortConfig' } } , namespace = 'urn:brocade.com:mgmt:brocade-qos-cpu' , defining_module = 'brocade-qos-cpu' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """slot must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("slot_id",slot.slot, yang_name="slot", rest_name="slot", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='slot-id', extensions={u'tailf-common': {u'info': u'Configure CPU QoS on slot', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'QosCpuPortConfig'}}), is_container='list', yang_name="slot", rest_name="slot", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure CPU QoS on slot', u'cli-suppress-mode': None, u'cli-incomplete-no': None, u'cli-sequence-commands': None, u'cli-incomplete-command': None, u'callpoint': u'QosCpuPortConfig'}}, namespace='urn:brocade.com:mgmt:brocade-qos-cpu', defining_module='brocade-qos-cpu', yang_type='list', is_config=True)""" , } )
self . __slot = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def is_rpm ( path ) :
"""Attempt to validate the path as an actual ( S ) RPM . If an exception
is raised then this is not an RPM .""" | import magic
m = magic . open ( magic . MAGIC_MIME )
m . load ( )
mime = m . file ( path )
# rpms or directories are cool
if 'rpm' in mime or 'directory' in mime :
return True
else :
juicer . utils . Log . log_info ( "error: File `%s` is not an rpm" % path )
return False |
def update ( self , instance , validated_data ) :
'''We want to set all the required fields if admin is set , and we want
to use the password hashing method if password is set .''' | admin = validated_data . pop ( 'is_superuser' , None )
password = validated_data . pop ( 'password' , None )
if validated_data . get ( 'email' ) is not None :
validated_data [ 'username' ] = validated_data [ 'email' ]
for attr , value in validated_data . items ( ) :
setattr ( instance , attr , value )
if admin is not None :
instance . is_staff = admin
instance . is_superuser = admin
if password is not None :
instance . set_password ( password )
instance . save ( )
return instance |
def bernard_auth ( func ) :
"""Authenticates the users based on the query - string - provided token""" | @ wraps ( func )
async def wrapper ( request : Request ) :
def get_query_token ( ) :
token_key = settings . WEBVIEW_TOKEN_KEY
return request . query . get ( token_key , '' )
def get_header_token ( ) :
header_key = settings . WEBVIEW_HEADER_NAME
return request . headers . get ( header_key , '' )
try :
token = next ( filter ( None , [ get_header_token ( ) , get_query_token ( ) , ] ) )
except StopIteration :
token = ''
try :
body = await request . json ( )
except ValueError :
body = None
msg , platform = await manager . message_from_token ( token , body )
if not msg :
return json_response ( { 'status' : 'unauthorized' , 'message' : 'No valid token found' , } , status = 401 )
return await func ( msg , platform )
return wrapper |
from typing import List
def parse_music ( music_string : str ) -> List [ int ] :
"""Input to this function is a string representing musical notes in a special ASCII format .
Your task is to parse this string and return list of integers corresponding to how many beats does each
note last .
Here is a legend :
' o ' - whole note , lasts four beats
' o | ' - half note , lasts two beats
' . | ' - quarter note , lasts one beat
> > > parse _ music ( ' o o | . | o | o | . | . | . | . | o o ' )
[4 , 2 , 1 , 2 , 2 , 1 , 1 , 1 , 1 , 4 , 4]""" | # define the mapping of notes to beats
notes_to_beats = { 'o' : 4 , 'o|' : 2 , '.|' : 1 }
# split the string into an array of notes
notes = music_string . split ( )
# generate the array of beats corresponding to each note
beats = [ notes_to_beats [ note ] for note in notes ]
return beats |
def is_ip_addr_list ( value , min = None , max = None ) :
"""Check that the value is a list of IP addresses .
You can optionally specify the minimum and maximum number of members .
Each list member is checked that it is an IP address .
> > > vtor = Validator ( )
> > > vtor . check ( ' ip _ addr _ list ' , ( ) )
> > > vtor . check ( ' ip _ addr _ list ' , [ ] )
> > > vtor . check ( ' ip _ addr _ list ' , ( ' 1.2.3.4 ' , ' 5.6.7.8 ' ) )
[ ' 1.2.3.4 ' , ' 5.6.7.8 ' ]
> > > vtor . check ( ' ip _ addr _ list ' , [ ' a ' ] ) # doctest : + SKIP
Traceback ( most recent call last ) :
VdtValueError : the value " a " is unacceptable .""" | return [ is_ip_addr ( mem ) for mem in is_list ( value , min , max ) ] |
def get_acgt_geno_marker ( self , marker ) :
"""Gets the genotypes for a given marker ( ACGT format ) .
Args :
marker ( str ) : The name of the marker .
Returns :
numpy . ndarray : The genotypes of the marker ( ACGT format ) .""" | # Getting the marker ' s genotypes
geno , snp_position = self . get_geno_marker ( marker , return_index = True )
# Returning the ACGT ' s format of the genotypes
return self . _allele_encoding [ snp_position ] [ geno ] |
def _add_data_to_general_stats ( self , data ) :
"""Add data for the general stats in a Picard - module specific manner""" | headers = _get_general_stats_headers ( )
self . general_stats_headers . update ( headers )
header_names = ( 'ERROR_count' , 'WARNING_count' , 'file_validation_status' )
general_data = dict ( )
for sample in data :
general_data [ sample ] = { column : data [ sample ] [ column ] for column in header_names }
if sample not in self . general_stats_data :
self . general_stats_data [ sample ] = dict ( )
if data [ sample ] [ 'file_validation_status' ] != 'pass' :
headers [ 'file_validation_status' ] [ 'hidden' ] = False
self . general_stats_data [ sample ] . update ( general_data [ sample ] ) |
def get_posts ( self , offset = 0 , limit = 1000 , order = None , filters = None ) :
"""This method returns list of Posts for this Data Source starting at a given offset and not more than limit
It will call content - specific methods :
_ format ( ) to format output from the DataStore""" | order = self . _get_order ( order )
cache_key = self . get_cache_key ( offset , limit , order , filters )
content = cache . get ( cache_key )
# if content :
# return content
try :
if self . up_to_date ( ) :
pass
# not time to update yet
else :
self . update ( )
except :
raise
pass
# query the database for now and update later
query = self . _get_query ( order = order , filters = filters )
posts = query [ int ( offset ) : int ( offset ) + int ( limit ) ]
posts = self . _format ( posts )
cache_duration = conf . GOSCALE_CACHE_DURATION if posts else 1
cache . set ( cache_key , posts , cache_duration )
return posts |
def parse_compound_table_file ( path , f ) :
"""Parse a tab - separated file containing compound IDs and properties
The compound properties are parsed according to the header which specifies
which property is contained in each column .""" | context = FilePathContext ( path )
for i , row in enumerate ( csv . DictReader ( f , delimiter = str ( '\t' ) ) ) :
if 'id' not in row or row [ 'id' ] . strip ( ) == '' :
raise ParseError ( 'Expected `id` column in table' )
props = { key : value for key , value in iteritems ( row ) if value != '' }
if 'charge' in props :
props [ 'charge' ] = int ( props [ 'charge' ] )
mark = FileMark ( context , i + 2 , None )
yield CompoundEntry ( props , mark ) |
def lerp ( vec1 , vec2 , time ) :
"""Lerp between vec1 to vec2 based on time . Time is clamped between 0 and 1.""" | if isinstance ( vec1 , Vector2 ) and isinstance ( vec2 , Vector2 ) : # Clamp the time value into the 0-1 range .
if time < 0 :
time = 0
elif time > 1 :
time = 1
x_lerp = vec1 [ 0 ] + time * ( vec2 [ 0 ] - vec1 [ 0 ] )
y_lerp = vec1 [ 1 ] + time * ( vec2 [ 1 ] - vec1 [ 1 ] )
return Vector2 ( x_lerp , y_lerp )
else :
raise TypeError ( "Objects must be of type Vector2" ) |
def write_offsets_to_file ( cls , json_file_name , consumer_offsets_data ) :
"""Save built consumer - offsets data to given json file .""" | # Save consumer - offsets to file
with open ( json_file_name , "w" ) as json_file :
try :
json . dump ( consumer_offsets_data , json_file )
except ValueError :
print ( "Error: Invalid json data {data}" . format ( data = consumer_offsets_data ) )
raise
print ( "Consumer offset data saved in json-file {file}" . format ( file = json_file_name ) ) |
def last_job_statuses ( self ) -> List [ str ] :
"""The last constants of the job in this experiment .""" | statuses = [ ]
for status in self . jobs . values_list ( 'status__status' , flat = True ) :
if status is not None :
statuses . append ( status )
return statuses |
def overlay_gateway_access_lists_ipv4_out_ipv4_acl_out_name ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
overlay_gateway = ET . SubElement ( config , "overlay-gateway" , xmlns = "urn:brocade.com:mgmt:brocade-tunnels" )
name_key = ET . SubElement ( overlay_gateway , "name" )
name_key . text = kwargs . pop ( 'name' )
access_lists = ET . SubElement ( overlay_gateway , "access-lists" )
ipv4 = ET . SubElement ( access_lists , "ipv4" )
out = ET . SubElement ( ipv4 , "out" )
ipv4_acl_out_name = ET . SubElement ( out , "ipv4-acl-out-name" )
ipv4_acl_out_name . text = kwargs . pop ( 'ipv4_acl_out_name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def get_freesasa_annotations ( self , outdir , include_hetatms = False , force_rerun = False ) :
"""Run ` ` freesasa ` ` on this structure and store the calculated properties in the corresponding ChainProps""" | if self . file_type != 'pdb' :
log . error ( '{}: unable to run freesasa with "{}" file type. Please change file type to "pdb"' . format ( self . id , self . file_type ) )
return
# Parse the structure to store chain sequences
if self . structure :
parsed = self . structure
else :
parsed = self . parse_structure ( )
if not parsed :
log . error ( '{}: unable to open structure to run freesasa' . format ( self . id ) )
return
# Set outfile name
log . debug ( '{}: running freesasa' . format ( self . id ) )
if include_hetatms :
outfile = '{}.freesasa_het.rsa' . format ( self . id )
else :
outfile = '{}.freesasa_nohet.rsa' . format ( self . id )
# Run freesasa
result = fs . run_freesasa ( infile = self . structure_path , outfile = outfile , include_hetatms = include_hetatms , outdir = outdir , force_rerun = force_rerun )
# Parse results
result_parsed = fs . parse_rsa_data ( result )
prop_dict = defaultdict ( lambda : defaultdict ( list ) )
for k , v in result_parsed . items ( ) :
chain = k [ 0 ]
for prop , calc in v . items ( ) :
prop_dict [ chain ] [ prop ] . append ( calc )
# Reorganize and store results
all_props = [ 'all_atoms_abs' , 'all_atoms_rel' , 'side_chain_abs' , 'side_chain_rel' , 'main_chain_abs' , 'main_chain_rel' , 'non_polar_abs' , 'non_polar_rel' , 'all_polar_abs' , 'all_polar_rel' ]
all_props_renamed = { 'all_atoms_abs' : 'ASA_ALL-freesasa' , 'all_atoms_rel' : 'RSA_ALL-freesasa' , 'all_polar_abs' : 'ASA_POLAR-freesasa' , 'all_polar_rel' : 'RSA_POLAR-freesasa' , 'main_chain_abs' : 'ASA_BACKBONE-freesasa' , 'main_chain_rel' : 'RSA_BACKBONE-freesasa' , 'non_polar_abs' : 'ASA_NONPOLAR-freesasa' , 'non_polar_rel' : 'RSA_NONPOLAR-freesasa' , 'side_chain_abs' : 'ASA_RESIDUE-freesasa' , 'side_chain_rel' : 'RSA_RESIDUE-freesasa' }
# # Rename dictionary keys based on if HETATMs were included
if include_hetatms :
suffix = '_het'
else :
suffix = '_nohet'
for k , v in all_props_renamed . items ( ) :
all_props_renamed [ k ] = v + suffix
for chain in self . chains :
for prop in all_props :
prop_list = ssbio . protein . structure . properties . residues . match_structure_sequence ( orig_seq = chain . seq_record , new_seq = prop_dict [ chain . id ] [ prop ] , fill_with = float ( 'Inf' ) , ignore_excess = True )
chain . seq_record . letter_annotations [ all_props_renamed [ prop ] ] = prop_list
log . debug ( '{}: stored freesasa calculations in chain seq_record letter_annotations' . format ( chain ) ) |
async def export_chat_invite_link ( self , chat_id : typing . Union [ base . Integer , base . String ] ) -> base . String :
"""Use this method to generate a new invite link for a chat ; any previously generated link is revoked .
The bot must be an administrator in the chat for this to work and must have the appropriate admin rights .
Source : https : / / core . telegram . org / bots / api # exportchatinvitelink
: param chat _ id : Unique identifier for the target chat or username of the target channel
: type chat _ id : : obj : ` typing . Union [ base . Integer , base . String ] `
: return : Returns exported invite link as String on success
: rtype : : obj : ` base . String `""" | payload = generate_payload ( ** locals ( ) )
result = await self . request ( api . Methods . EXPORT_CHAT_INVITE_LINK , payload )
return result |
def produce_scansion ( self , stresses : list , syllables_wspaces : List [ str ] , offset_map : Dict [ int , int ] ) -> str :
"""Create a scansion string that has stressed and unstressed syllable positions in locations
that correspond with the original texts syllable vowels .
: param stresses list of syllable positions
: param syllables _ wspaces list of syllables with spaces escaped for punctuation or elision
: param offset _ map dictionary of syllable positions , and an offset amount which is the
number of spaces to skip in the original line before inserting the accent .""" | scansion = list ( " " * len ( string_utils . flatten ( syllables_wspaces ) ) )
unstresses = string_utils . get_unstresses ( stresses , len ( syllables_wspaces ) )
try :
for idx in unstresses :
location = offset_map . get ( idx )
if location is not None :
scansion [ location ] = self . constants . UNSTRESSED
for idx in stresses :
location = offset_map . get ( idx )
if location is not None :
scansion [ location ] = self . constants . STRESSED
except Exception as e :
LOG . error ( "problem with syllables; check syllabification {}, {}" . format ( syllables_wspaces , e ) )
return "" . join ( scansion ) |
def commandstr ( command ) :
"""Convert command into string .""" | if command == CMD_MESSAGE_ERROR :
msg = "CMD_MESSAGE_ERROR"
elif command == CMD_MESSAGE_LIST :
msg = "CMD_MESSAGE_LIST"
elif command == CMD_MESSAGE_PASSWORD :
msg = "CMD_MESSAGE_PASSWORD"
elif command == CMD_MESSAGE_MP3 :
msg = "CMD_MESSAGE_MP3"
elif command == CMD_MESSAGE_DELETE :
msg = "CMD_MESSAGE_DELETE"
elif command == CMD_MESSAGE_VERSION :
msg = "CMD_MESSAGE_VERSION"
elif command == CMD_MESSAGE_CDR_AVAILABLE :
msg = "CMD_MESSAGE_CDR_AVAILABLE"
elif command == CMD_MESSAGE_CDR :
msg = "CMD_MESSAGE_CDR"
else :
msg = "CMD_MESSAGE_UNKNOWN"
return msg |
def format_strings ( self , ** kwargs ) :
"""String substitution of name .""" | return mutablerecords . CopyRecord ( self , name = util . format_string ( self . name , kwargs ) ) |
def _on_read_complete ( self , data ) :
"""数据获取结束""" | request = self . app . request_class ( self , data )
self . _handle_request ( request ) |
def which ( cmd ) :
"""Returns full path to a executable .
Args :
cmd ( str ) : Executable command to search for .
Returns :
( str ) Full path to command . None if it is not found .
Example : :
full _ path _ to _ python = which ( " python " )""" | def is_exe ( fp ) :
return os . path . isfile ( fp ) and os . access ( fp , os . X_OK )
fpath , fname = os . path . split ( cmd )
if fpath :
if is_exe ( cmd ) :
return cmd
else :
for path in os . environ [ "PATH" ] . split ( os . pathsep ) :
exe_file = os . path . join ( path , cmd )
if is_exe ( exe_file ) :
return exe_file
return None |
def validate_swagger_schema ( schema_dir , resource_listing ) :
"""Validate the structure of Swagger schemas against the spec .
* * Valid only for Swagger v1.2 spec * *
Note : It is possible that resource _ listing is not present in
the schema _ dir . The path is passed in the call so that ssv
can fetch the api - declaration files from the path .
: param resource _ listing : Swagger Spec v1.2 resource listing
: type resource _ listing : dict
: param schema _ dir : A path to Swagger spec directory
: type schema _ dir : string
: raises : : py : class : ` swagger _ spec _ validator . SwaggerValidationError `""" | schema_filepath = os . path . join ( schema_dir , API_DOCS_FILENAME )
swagger_spec_validator . validator12 . validate_spec ( resource_listing , urlparse . urljoin ( 'file:' , pathname2url ( os . path . abspath ( schema_filepath ) ) ) , ) |
def broken_faces ( mesh , color = None ) :
"""Return the index of faces in the mesh which break the
watertight status of the mesh .
Parameters
mesh : Trimesh object
color : ( 4 , ) uint8 , will set broken faces to this color
None , will not alter mesh colors
Returns
broken : ( n , ) int , indexes of mesh . faces""" | adjacency = nx . from_edgelist ( mesh . face_adjacency )
broken = [ k for k , v in dict ( adjacency . degree ( ) ) . items ( ) if v != 3 ]
broken = np . array ( broken )
if color is not None : # if someone passed a broken color
color = np . array ( color )
if not ( color . shape == ( 4 , ) or color . shape == ( 3 , ) ) :
color = [ 255 , 0 , 0 , 255 ]
mesh . visual . face_colors [ broken ] = color
return broken |
def location ( value ) :
"""Transform an IP address into an approximate location .
Example output :
* Zwolle , The Netherlands
* The Netherlands
* None""" | try :
location = geoip ( ) and geoip ( ) . city ( value )
except Exception :
try :
location = geoip ( ) and geoip ( ) . country ( value )
except Exception as e :
warnings . warn ( str ( e ) )
location = None
if location and location [ 'country_name' ] :
if 'city' in location and location [ 'city' ] :
return '{}, {}' . format ( location [ 'city' ] , location [ 'country_name' ] )
return location [ 'country_name' ]
return None |
async def get_attached_modules ( request ) :
"""On success ( including an empty " modules " list if no modules are detected ) :
/ / status : 200
" modules " : [
/ * * name of module * /
" name " : " string " ,
/ * * model identifier ( i . e . part number ) * /
" model " : " string " ,
/ * * unique serial number * /
" serial " : " string " ,
/ * * current firmware version * /
" fwVersion " : " string " ,
/ * * human readable status * /
" status " : " string " ,
/ * * human readable display name * /
" displayName " : " string " ,
On failure :
/ / status : 500
" message " : " . . . " """ | hw = hw_from_req ( request )
if ff . use_protocol_api_v2 ( ) :
hw_mods = await hw . discover_modules ( )
module_data = [ { 'name' : mod . name ( ) , 'displayName' : mod . display_name ( ) , 'port' : mod . port , 'serial' : mod . device_info . get ( 'serial' ) , 'model' : mod . device_info . get ( 'model' ) , 'fwVersion' : mod . device_info . get ( 'version' ) , ** mod . live_data } for mod in hw_mods ]
else :
hw . discover_modules ( )
hw_mods = hw . attached_modules . values ( )
module_data = [ { 'name' : mod . name ( ) , 'displayName' : mod . display_name ( ) , 'port' : mod . port , 'serial' : mod . device_info and mod . device_info . get ( 'serial' ) , 'model' : mod . device_info and mod . device_info . get ( 'model' ) , 'fwVersion' : mod . device_info and mod . device_info . get ( 'version' ) , ** mod . live_data } for mod in hw_mods ]
return web . json_response ( data = { "modules" : module_data } , status = 200 ) |
def check_required_keys ( self , required_keys ) :
'''raise InsufficientGraftMPackageException if this package does not
conform to the standard of the given package''' | h = self . _contents_hash
for key in required_keys :
if key not in h :
raise InsufficientGraftMPackageException ( "Package missing key %s" % key ) |
def clear_spatial_unit_conditions ( self ) :
"""stub""" | if ( self . get_spatial_unit_conditions_metadata ( ) . is_read_only ( ) or self . get_zone_conditions_metadata ( ) . is_required ( ) ) :
raise NoAccess ( )
self . my_osid_object_form . _my_map [ 'spatialUnitConditions' ] = self . _zone_conditions_metadata [ 'default_object_values' ] [ 0 ] |
def get ( self , name = None , plugin = None ) :
"""Returns requested shared objects .
: param name : Name of a request shared object
: type name : str or None
: param plugin : Plugin , which has registered the requested shared object
: type plugin : GwBasePattern instance or None""" | if plugin is not None :
if name is None :
shared_objects_list = { }
for key in self . _shared_objects . keys ( ) :
if self . _shared_objects [ key ] . plugin == plugin :
shared_objects_list [ key ] = self . _shared_objects [ key ]
return shared_objects_list
else :
if name in self . _shared_objects . keys ( ) :
if self . _shared_objects [ name ] . plugin == plugin :
return self . _shared_objects [ name ]
else :
return None
else :
return None
else :
if name is None :
return self . _shared_objects
else :
if name in self . _shared_objects . keys ( ) :
return self . _shared_objects [ name ]
else :
return None |
def remove_server ( self , server_id ) :
"""Remove a registered WBEM server from the subscription manager . This
also unregisters listeners from that server and removes all owned
indication subscriptions , owned indication filters and owned listener
destinations .
Parameters :
server _ id ( : term : ` string ` ) :
The server ID of the WBEM server , returned by
: meth : ` ~ pywbem . WBEMSubscriptionManager . add _ server ` .
Raises :
Exceptions raised by : class : ` ~ pywbem . WBEMConnection ` .""" | # Validate server _ id
server = self . _get_server ( server_id )
# Delete any instances we recorded to be cleaned up
if server_id in self . _owned_subscriptions :
inst_list = self . _owned_subscriptions [ server_id ]
# We iterate backwards because we change the list
for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) :
inst = inst_list [ i ]
server . conn . DeleteInstance ( inst . path )
del inst_list [ i ]
del self . _owned_subscriptions [ server_id ]
if server_id in self . _owned_filters :
inst_list = self . _owned_filters [ server_id ]
# We iterate backwards because we change the list
for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) :
inst = inst_list [ i ]
server . conn . DeleteInstance ( inst . path )
del inst_list [ i ]
del self . _owned_filters [ server_id ]
if server_id in self . _owned_destinations :
inst_list = self . _owned_destinations [ server_id ]
# We iterate backwards because we change the list
for i in six . moves . range ( len ( inst_list ) - 1 , - 1 , - 1 ) :
inst = inst_list [ i ]
server . conn . DeleteInstance ( inst . path )
del inst_list [ i ]
del self . _owned_destinations [ server_id ]
# Remove server from this listener
del self . _servers [ server_id ] |
def register_bec_task ( self , * args , ** kwargs ) :
"""Register a BEC task .""" | kwargs [ "task_class" ] = BecTask
return self . register_task ( * args , ** kwargs ) |
def random ( self , max_number = None ) :
"""Return a random integer between min and max ( inclusive ) .""" | min_number = self . obj
if max_number is None :
min_number = 0
max_number = self . obj
return random . randrange ( min_number , max_number ) |
async def _send_plain_text ( self , request : Request , stack : Stack ) :
"""Sends plain text using ` _ send _ text ( ) ` .""" | await self . _send_text ( request , stack , None ) |
def replace_namespaced_lease ( self , name , namespace , body , ** kwargs ) : # noqa : E501
"""replace _ namespaced _ lease # noqa : E501
replace the specified Lease # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . replace _ namespaced _ lease ( name , namespace , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the Lease ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param V1beta1Lease body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: return : V1beta1Lease
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . replace_namespaced_lease_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
else :
( data ) = self . replace_namespaced_lease_with_http_info ( name , namespace , body , ** kwargs )
# noqa : E501
return data |
def _apply_rule ( self , dates ) :
"""Apply the given offset / observance to a DatetimeIndex of dates .
Parameters
dates : DatetimeIndex
Dates to apply the given offset / observance rule
Returns
Dates with rules applied""" | if self . observance is not None :
return dates . map ( lambda d : self . observance ( d ) )
if self . offset is not None :
if not isinstance ( self . offset , list ) :
offsets = [ self . offset ]
else :
offsets = self . offset
for offset in offsets : # if we are adding a non - vectorized value
# ignore the PerformanceWarnings :
with warnings . catch_warnings ( ) :
warnings . simplefilter ( "ignore" , PerformanceWarning )
dates += offset
return dates |
def set_comment ( self , format , * args ) :
"""Add comment to config item before saving to disk . You can add as many
comment lines as you like . If you use a null format , all comments are
deleted .""" | return lib . zconfig_set_comment ( self . _as_parameter_ , format , * args ) |
def get ( self , url_type ) :
"""Accepts either ' public ' or ' private ' as a parameter , and returns the
corresponding value for ' public _ url ' or ' private _ url ' , respectively .""" | lowtype = url_type . lower ( )
if lowtype == "public" :
return self . public_url
elif lowtype == "private" :
return self . private_url
else :
raise ValueError ( "Valid values are 'public' or 'private'; " "received '%s'." % url_type ) |
def _make_command_filename ( self , exe ) :
"""The internal function to build up a filename based on a command .""" | outfn = os . path . join ( self . commons [ 'cmddir' ] , self . name ( ) , self . _mangle_command ( exe ) )
# check for collisions
if os . path . exists ( outfn ) :
inc = 2
while True :
newfn = "%s_%d" % ( outfn , inc )
if not os . path . exists ( newfn ) :
outfn = newfn
break
inc += 1
return outfn |
def get_static_properties ( self ) :
"""Returns a dictionary of STATICPROPERTIES
Examples
> > > reader = XBNReader ( ' xbn _ test . xml ' )
> > > reader . get _ static _ properties ( )
{ ' FORMAT ' : ' MSR DTAS XML ' , ' VERSION ' : ' 0.2 ' , ' CREATOR ' : ' Microsoft Research DTAS ' }""" | return { tags . tag : tags . get ( 'VALUE' ) for tags in self . bnmodel . find ( 'STATICPROPERTIES' ) } |
def mount ( cls , mount_point , lower_dir , upper_dir , mount_table = None ) :
"""Execute the mount . This requires root""" | ensure_directories ( mount_point , lower_dir , upper_dir )
# Load the mount table if it isn ' t given
if not mount_table :
mount_table = MountTable . load ( )
# Check if the mount _ point is in use
if mount_table . is_mounted ( mount_point ) : # Throw an error if it is
raise AlreadyMounted ( )
# Build mount options
options = "rw,lowerdir=%s,upperdir=%s" % ( lower_dir , upper_dir )
# Run the actual mount
subwrap . run ( [ 'mount' , '-t' , 'overlayfs' , '-o' , options , 'olyfs%s' % random_name ( ) , mount_point ] )
return cls ( mount_point , lower_dir , upper_dir ) |
def next ( self ) :
"""Returns the next query result .
: return :
The next query result .
: rtype : dict
: raises StopIteration : If no more result is left .""" | if self . _has_finished :
raise StopIteration
if not len ( self . _buffer ) :
results = self . fetch_next_block ( )
self . _buffer . extend ( results )
if not len ( self . _buffer ) :
raise StopIteration
return self . _buffer . popleft ( ) |
def all_partitions ( mechanism , purview , node_labels = None ) :
"""Return all possible partitions of a mechanism and purview .
Partitions can consist of any number of parts .
Args :
mechanism ( tuple [ int ] ) : A mechanism .
purview ( tuple [ int ] ) : A purview .
Yields :
KPartition : A partition of this mechanism and purview into ` ` k ` ` parts .""" | for mechanism_partition in partitions ( mechanism ) :
mechanism_partition . append ( [ ] )
n_mechanism_parts = len ( mechanism_partition )
max_purview_partition = min ( len ( purview ) , n_mechanism_parts )
for n_purview_parts in range ( 1 , max_purview_partition + 1 ) :
n_empty = n_mechanism_parts - n_purview_parts
for purview_partition in k_partitions ( purview , n_purview_parts ) :
purview_partition = [ tuple ( _list ) for _list in purview_partition ]
# Extend with empty tuples so purview partition has same size
# as mechanism purview
purview_partition . extend ( [ ( ) ] * n_empty )
# Unique permutations to avoid duplicates empties
for purview_permutation in set ( permutations ( purview_partition ) ) :
parts = [ Part ( tuple ( m ) , tuple ( p ) ) for m , p in zip ( mechanism_partition , purview_permutation ) ]
# Must partition the mechanism , unless the purview is fully
# cut away from the mechanism .
if parts [ 0 ] . mechanism == mechanism and parts [ 0 ] . purview :
continue
yield KPartition ( * parts , node_labels = node_labels ) |
def start_server ( broker , backend = None , port = 12223 , max_tasks = 10000 , max_workers = 100 , blocking = False , debug = False ) : # pragma : no cover
"""Starts a Clearly Server programmatically .""" | _setup_logging ( debug )
queue_listener_dispatcher = Queue ( )
listener = EventListener ( broker , queue_listener_dispatcher , backend = backend , max_tasks_in_memory = max_tasks , max_workers_in_memory = max_workers )
dispatcher = StreamingDispatcher ( queue_listener_dispatcher )
clearlysrv = ClearlyServer ( listener , dispatcher )
return _serve ( clearlysrv , port , blocking ) |
def appendDatastore ( self , store ) :
'''Appends datastore ` store ` to this collection .''' | if not isinstance ( store , Datastore ) :
raise TypeError ( "stores must be of type %s" % Datastore )
self . _stores . append ( store ) |
def start ( self , channel = None ) :
"""Start this emulated device .
This triggers the controller to call start on all peripheral tiles in
the device to make sure they start after the controller does and then
it waits on each one to make sure they have finished initializing
before returning .
Args :
channel ( IOTilePushChannel ) : the channel with a stream and trace
routine for streaming and tracing data through a VirtualInterface""" | super ( EmulatedDevice , self ) . start ( channel )
self . emulator . start ( ) |
def listdirs ( path = '.' ) :
"""generator that returns all directories of * path *""" | import os
for f in os . listdir ( path ) :
if isdir ( join ( path , f ) ) :
yield join ( path , f ) if path != '.' else f |
def encodeSequence ( seq_vec , vocab , neutral_vocab , maxlen = None , seq_align = "start" , pad_value = "N" , encode_type = "one_hot" ) :
"""Convert a list of genetic sequences into one - hot - encoded array .
# Arguments
seq _ vec : list of strings ( genetic sequences )
vocab : list of chars : List of " words " to use as the vocabulary . Can be strings of length > 0,
but all need to have the same length . For DNA , this is : [ " A " , " C " , " G " , " T " ] .
neutral _ vocab : list of chars : Values used to pad the sequence or represent unknown - values . For DNA , this is : [ " N " ] .
maxlen : int or None ,
Should we trim ( subset ) the resulting sequence . If None don ' t trim .
Note that trims wrt the align parameter .
It should be smaller than the longest sequence .
seq _ align : character ; ' end ' or ' start '
To which end should we align sequences ?
encode _ type : " one _ hot " or " token " . " token " represents each vocab element as a positive integer from 1 to len ( vocab ) + 1.
neutral _ vocab is represented with 0.
# Returns
Array with shape for encode _ type :
- " one _ hot " : ` ( len ( seq _ vec ) , maxlen , len ( vocab ) ) `
- " token " : ` ( len ( seq _ vec ) , maxlen ) `
If ` maxlen = None ` , it gets the value of the longest sequence length from ` seq _ vec ` .""" | if isinstance ( neutral_vocab , str ) :
neutral_vocab = [ neutral_vocab ]
if isinstance ( seq_vec , str ) :
raise ValueError ( "seq_vec should be an iterable returning " + "strings not a string itself" )
assert len ( vocab [ 0 ] ) == len ( pad_value )
assert pad_value in neutral_vocab
assert encode_type in [ "one_hot" , "token" ]
seq_vec = pad_sequences ( seq_vec , maxlen = maxlen , align = seq_align , value = pad_value )
if encode_type == "one_hot" :
arr_list = [ token2one_hot ( tokenize ( seq , vocab , neutral_vocab ) , len ( vocab ) ) for i , seq in enumerate ( seq_vec ) ]
elif encode_type == "token" :
arr_list = [ 1 + np . array ( tokenize ( seq , vocab , neutral_vocab ) ) for seq in seq_vec ]
# we add 1 to be compatible with keras : https : / / keras . io / layers / embeddings /
# indexes > 0 , 0 = padding element
return np . stack ( arr_list ) |
def poke_native ( getstate ) :
"""Serializer factory for types which state can be natively serialized .
Arguments :
getstate ( callable ) : takes an object and returns the object ' s state
to be passed to ` pokeNative ` .
Returns :
callable : serializer ( ` poke ` routine ) .""" | def poke ( service , objname , obj , container , visited = None , _stack = None ) :
service . pokeNative ( objname , getstate ( obj ) , container )
return poke |
def remove ( name = None , pkgs = None , jail = None , chroot = None , root = None , all_installed = False , force = False , glob = False , dryrun = False , recurse = False , regex = False , pcre = False , ** kwargs ) :
'''Remove a package from the database and system
. . note : :
This function can accessed using ` ` pkg . delete ` ` in addition to
` ` pkg . remove ` ` , to more closely match the CLI usage of ` ` pkg ( 8 ) ` ` .
name
The package to remove
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < package name >
jail
Delete the package from the specified jail
chroot
Delete the package from the specified chroot ( ignored if ` ` jail ` ` is
specified )
root
Delete the package from the specified root ( ignored if ` ` jail ` ` is
specified )
all _ installed
Deletes all installed packages from the system and empties the
database . USE WITH CAUTION !
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove all all _ installed = True force = True
force
Forces packages to be removed despite leaving unresolved
dependencies .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < package name > force = True
glob
Treat the package names as shell glob patterns .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < package name > glob = True
dryrun
Dry run mode . The list of packages to delete is always printed , but
no packages are actually deleted .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < package name > dryrun = True
recurse
Delete all packages that require the listed package as well .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < package name > recurse = True
regex
Treat the package names as regular expressions .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < regular expression > regex = True
pcre
Treat the package names as extended regular expressions .
CLI Example :
. . code - block : : bash
salt ' * ' pkg . remove < extended regular expression > pcre = True''' | del kwargs
# Unused parameter
try :
pkg_params = __salt__ [ 'pkg_resource.parse_targets' ] ( name , pkgs ) [ 0 ]
except MinionError as exc :
raise CommandExecutionError ( exc )
targets = [ ]
old = list_pkgs ( jail = jail , chroot = chroot , root = root , with_origin = True )
for pkg in pkg_params . items ( ) : # FreeBSD pkg supports ` openjdk ` and ` java / openjdk7 ` package names
if pkg [ 0 ] . find ( "/" ) > 0 :
origin = pkg [ 0 ]
pkg = [ k for k , v in six . iteritems ( old ) if v [ 'origin' ] == origin ] [ 0 ]
if pkg [ 0 ] in old :
targets . append ( pkg [ 0 ] )
if not targets :
return { }
opts = ''
if salt . utils . data . is_true ( all_installed ) :
opts += 'a'
if salt . utils . data . is_true ( force ) :
opts += 'f'
if salt . utils . data . is_true ( glob ) :
opts += 'g'
if salt . utils . data . is_true ( dryrun ) :
opts += 'n'
if not salt . utils . data . is_true ( dryrun ) :
opts += 'y'
if salt . utils . data . is_true ( recurse ) :
opts += 'R'
if salt . utils . data . is_true ( regex ) :
opts += 'x'
if salt . utils . data . is_true ( pcre ) :
opts += 'X'
cmd = _pkg ( jail , chroot , root )
cmd . append ( 'delete' )
if opts :
cmd . append ( '-' + opts )
cmd . extend ( targets )
out = __salt__ [ 'cmd.run_all' ] ( cmd , output_loglevel = 'trace' , python_shell = False )
if out [ 'retcode' ] != 0 and out [ 'stderr' ] :
errors = [ out [ 'stderr' ] ]
else :
errors = [ ]
__context__ . pop ( _contextkey ( jail , chroot , root ) , None )
__context__ . pop ( _contextkey ( jail , chroot , root , prefix = 'pkg.origin' ) , None )
new = list_pkgs ( jail = jail , chroot = chroot , root = root , with_origin = True )
ret = salt . utils . data . compare_dicts ( old , new )
if errors :
raise CommandExecutionError ( 'Problem encountered removing package(s)' , info = { 'errors' : errors , 'changes' : ret } )
return ret |
def install ( self , apk_path , destination_dir = None , timeout_ms = None ) :
"""Install apk to device .
Doesn ' t support verifier file , instead allows destination directory to be
overridden .
Arguments :
apk _ path : Local path to apk to install .
destination _ dir : Optional destination directory . Use / system / app / for
persistent applications .
timeout _ ms : Expected timeout for pushing and installing .
Returns :
The pm install output .""" | if not destination_dir :
destination_dir = '/data/local/tmp/'
basename = os . path . basename ( apk_path )
destination_path = destination_dir + basename
self . push ( apk_path , destination_path , timeout_ms = timeout_ms )
return self . Shell ( 'pm install -r "%s"' % destination_path , timeout_ms = timeout_ms ) |
def pyeapi_call ( method , * args , ** kwargs ) :
'''. . versionadded : : 2019.2.0
Invoke an arbitrary method from the ` ` pyeapi ` ` library .
This function forwards the existing connection details to the
: mod : ` pyeapi . run _ commands < salt . module . arista _ pyeapi . run _ commands > `
execution function .
method
The name of the ` ` pyeapi ` ` method to invoke .
kwargs
Key - value arguments to send to the ` ` pyeapi ` ` method .
CLI Example :
. . code - block : : bash
salt ' * ' napalm . pyeapi _ call run _ commands ' show version ' encoding = text
salt ' * ' napalm . pyeapi _ call get _ config as _ string = True''' | pyeapi_kwargs = pyeapi_nxos_api_args ( ** kwargs )
return __salt__ [ 'pyeapi.call' ] ( method , * args , ** pyeapi_kwargs ) |
def create_log2fc_bigwigs ( matrix , outdir , args ) : # type : ( pd . DataFrame , str , Namespace ) - > None
"""Create bigwigs from matrix .""" | call ( "mkdir -p {}" . format ( outdir ) , shell = True )
genome_size_dict = args . chromosome_sizes
outpaths = [ ]
for bed_file in matrix [ args . treatment ] :
outpath = join ( outdir , splitext ( basename ( bed_file ) ) [ 0 ] + "_log2fc.bw" )
outpaths . append ( outpath )
data = create_log2fc_data ( matrix , args )
Parallel ( n_jobs = args . number_cores ) ( delayed ( _create_bigwig ) ( bed_column , outpath , genome_size_dict ) for outpath , bed_column in zip ( outpaths , data ) ) |
def advise ( self , item , stop = False ) :
"""Request updates when DDE data changes .""" | hszItem = DDE . CreateStringHandle ( self . _idInst , item , CP_WINUNICODE )
hDdeData = DDE . ClientTransaction ( LPBYTE ( ) , 0 , self . _hConv , hszItem , CF_TEXT , XTYP_ADVSTOP if stop else XTYP_ADVSTART , TIMEOUT_ASYNC , LPDWORD ( ) )
DDE . FreeStringHandle ( self . _idInst , hszItem )
if not hDdeData :
raise DDEError ( "Unable to %s advise" % ( "stop" if stop else "start" ) , self . _idInst )
DDE . FreeDataHandle ( hDdeData ) |
def create ( recipients , data , cipher , flags = 0 ) :
"""Creates and encrypts message
@ param recipients - list of X509 objects
@ param data - contents of the message
@ param cipher - CipherType object
@ param flags - flag""" | recp = StackOfX509 ( recipients )
bio = Membio ( data )
cms_ptr = libcrypto . CMS_encrypt ( recp . ptr , bio . bio , cipher . cipher , flags )
if cms_ptr is None :
raise CMSError ( "encrypt EnvelopedData" )
return EnvelopedData ( cms_ptr ) |
def get_build_template ( template_name , params = None , to_file = None ) :
'''get _ build template returns a string or file for a particular build template , which is
intended to build a version of a Singularity image on a cloud resource .
: param template _ name : the name of the template to retrieve in build / scripts
: param params : ( if needed ) a dictionary of parameters to substitute in the file
: param to _ file : if defined , will write to file . Default returns string .''' | base = get_installdir ( )
template_folder = "%s/build/scripts" % ( base )
template_file = "%s/%s" % ( template_folder , template_name )
if os . path . exists ( template_file ) :
bot . debug ( "Found template %s" % template_file )
# Implement when needed - substitute params here
# Will need to read in file instead of copying below
# if params ! = None :
if to_file is not None :
shutil . copyfile ( template_file , to_file )
bot . debug ( "Template file saved to %s" % to_file )
return to_file
# If the user wants a string
content = '' . join ( read_file ( template_file ) )
return content
else :
bot . warning ( "Template %s not found." % template_file ) |
def credits ( self ) :
"""Returns either a tuple representing the credit range or a
single integer if the range is set to one value .
Use self . cred to always get the tuple .""" | if self . cred [ 0 ] == self . cred [ 1 ] :
return self . cred [ 0 ]
return self . cred |
def get_stack_var ( name , depth = 0 ) :
'''This function may fiddle with the locals of the calling function ,
to make it the root function of the fiber . If called from a short - lived
function be sure to use a bigger frame depth .
Returns the fiber state or None .''' | base_frame = _get_base_frame ( depth )
if not base_frame : # Frame not found
raise RuntimeError ( "Base frame not found" )
# Lookup up the frame stack starting at the base frame for the fiber state
level = 0
frame = base_frame
while frame :
locals = frame . f_locals
value = locals . get ( name )
if value is not None :
if level > 0 : # Copy a reference of the fiber state in the base frame
base_frame . f_locals [ name ] = value
return value
if locals . get ( SECTION_BOUNDARY_TAG ) :
return None
frame = frame . f_back
level += 1
return None |
def iter_chunks ( self ) :
"""Yield each readable chunk present in the region .
Chunks that can not be read for whatever reason are silently skipped .
Warning : this function returns a : class : ` nbt . nbt . NBTFile ` object , use ` ` Chunk ( nbtfile ) ` ` to get a
: class : ` nbt . chunk . Chunk ` instance .""" | for m in self . get_metadata ( ) :
try :
yield self . get_chunk ( m . x , m . z )
except RegionFileFormatError :
pass |
def _convert_hdxobjects ( self , hdxobjects ) : # type : ( List [ HDXObjectUpperBound ] ) - > List [ HDXObjectUpperBound ]
"""Helper function to convert supplied list of HDX objects to a list of dict
Args :
hdxobjects ( List [ T < = HDXObject ] ) : List of HDX objects to convert
Returns :
List [ Dict ] : List of HDX objects converted to simple dictionaries""" | newhdxobjects = list ( )
for hdxobject in hdxobjects :
newhdxobjects . append ( hdxobject . data )
return newhdxobjects |
def _spot_check_that_elements_produced_by_this_generator_have_attribute ( self , name ) :
"""Helper function to spot - check that the items produces by this generator have the attribute ` name ` .""" | g_tmp = self . values_gen . spawn ( )
sample_element = next ( g_tmp ) [ 0 ]
try :
getattr ( sample_element , name )
except AttributeError :
raise AttributeError ( f"Items produced by {self} do not have the attribute '{name}'" ) |
def extract_firmware ( self ) :
'''Extract camera firmware ( tag is called ' software ' in EXIF )''' | fields = [ 'Image Software' ]
software , _ = self . _extract_alternative_fields ( fields , default = "" , field_type = str )
return software |
def workspaces ( self , index = None ) :
"""return generator for all all workspace instances""" | c = self . centralWidget ( )
if index is None :
return ( c . widget ( n ) for n in range ( c . count ( ) ) )
else :
return c . widget ( index ) |
def calculate_trans ( thickness_cm : np . float , miu_per_cm : np . array ) :
"""calculate the transmission signal using the formula
transmission = exp ( - thickness _ cm * atoms _ per _ cm3 * 1e - 24 * sigma _ b )
Parameters :
thickness : float ( in cm )
atoms _ per _ cm3 : float ( number of atoms per cm3 of element / isotope )
sigma _ b : np . array of sigma retrieved from database
Returns :
transmission array""" | transmission = np . exp ( - thickness_cm * miu_per_cm )
return np . array ( transmission ) |
def __remove_obsolete_metadata ( self ) :
"""Removes obsolete entries from the metadata of all stored routines .""" | clean = { }
for key , _ in self . _source_file_names . items ( ) :
if key in self . _pystratum_metadata :
clean [ key ] = self . _pystratum_metadata [ key ]
self . _pystratum_metadata = clean |
def deserialize ( cls , target_class , pagination_response ) :
""": type target _ class : client . Pagination | type
: type pagination _ response : dict
: rtype : client . Pagination""" | pagination = client . Pagination ( )
pagination . __dict__ . update ( cls . parse_pagination_dict ( pagination_response ) )
return pagination |
def query_builder ( self , paths_rows = None , lat = None , lon = None , address = None , start_date = None , end_date = None , cloud_min = None , cloud_max = None ) :
"""Builds the proper search syntax ( query ) for Landsat API .
: param paths _ rows :
A string in this format : " 003,003,004,004 " . Must be in pairs and separated by comma .
: type paths _ rows :
String
: param lat :
The latitude
: type lat :
String , float , integer
: param lon :
The The longitude
: type lon :
String , float , integer
: param address :
The address
: type address :
String
: param start _ date :
Date string . format : YYYY - MM - DD
: type start _ date :
String
: param end _ date :
date string . format : YYYY - MM - DD
: type end _ date :
String
: param cloud _ min :
float specifying the minimum percentage . e . g . 4.3
: type cloud _ min :
float
: param cloud _ max :
float specifying the maximum percentage . e . g . 78.9
: type cloud _ max :
float
: returns :
String""" | query = [ ]
or_string = ''
and_string = ''
search_string = ''
if paths_rows : # Coverting rows and paths to paired list
new_array = create_paired_list ( paths_rows )
paths_rows = [ '(%s)' % self . row_path_builder ( i [ 0 ] , i [ 1 ] ) for i in new_array ]
or_string = '+OR+' . join ( map ( str , paths_rows ) )
if start_date and end_date :
query . append ( self . date_range_builder ( start_date , end_date ) )
elif start_date :
query . append ( self . date_range_builder ( start_date , '2100-01-01' ) )
elif end_date :
query . append ( self . date_range_builder ( '2009-01-01' , end_date ) )
if cloud_min and cloud_max :
query . append ( self . cloud_cover_prct_range_builder ( cloud_min , cloud_max ) )
elif cloud_min :
query . append ( self . cloud_cover_prct_range_builder ( cloud_min , '100' ) )
elif cloud_max :
query . append ( self . cloud_cover_prct_range_builder ( '-1' , cloud_max ) )
if address :
query . append ( self . address_builder ( address ) )
elif ( lat is not None ) and ( lon is not None ) :
query . append ( self . lat_lon_builder ( lat , lon ) )
if query :
and_string = '+AND+' . join ( map ( str , query ) )
if and_string and or_string :
search_string = and_string + '+AND+(' + or_string + ')'
else :
search_string = or_string + and_string
return search_string |
def accel_quit ( self , * args ) :
"""Callback to prompt the user whether to quit Guake or not .""" | procs = self . notebook_manager . get_running_fg_processes_count ( )
tabs = self . notebook_manager . get_n_pages ( )
notebooks = self . notebook_manager . get_n_notebooks ( )
prompt_cfg = self . settings . general . get_boolean ( 'prompt-on-quit' )
prompt_tab_cfg = self . settings . general . get_int ( 'prompt-on-close-tab' )
# " Prompt on tab close " config overrides " prompt on quit " config
if prompt_cfg or ( prompt_tab_cfg == 1 and procs > 0 ) or ( prompt_tab_cfg == 2 ) :
log . debug ( "Remaining procs=%r" , procs )
if PromptQuitDialog ( self . window , procs , tabs , notebooks ) . quit ( ) :
log . info ( "Quitting Guake" )
Gtk . main_quit ( )
else :
log . info ( "Quitting Guake" )
Gtk . main_quit ( ) |
def copy ( self ) :
"""Return a shallow copy of this graph .""" | other = DirectedGraph ( )
other . _vertices = set ( self . _vertices )
other . _forwards = { k : set ( v ) for k , v in self . _forwards . items ( ) }
other . _backwards = { k : set ( v ) for k , v in self . _backwards . items ( ) }
return other |
def get_actions ( actions ) :
"""Get actions .""" | new_actions = [ ]
if actions :
for action in actions :
action_obj = get_action ( action )
if action_obj :
new_actions . append ( action_obj )
return new_actions |
def icp ( a , b , initial = np . identity ( 4 ) , threshold = 1e-5 , max_iterations = 20 , ** kwargs ) :
"""Apply the iterative closest point algorithm to align a point cloud with
another point cloud or mesh . Will only produce reasonable results if the
initial transformation is roughly correct . Initial transformation can be
found by applying Procrustes ' analysis to a suitable set of landmark
points ( often picked manually ) .
Parameters
a : ( n , 3 ) float
List of points in space .
b : ( m , 3 ) float or Trimesh
List of points in space or mesh .
initial : ( 4,4 ) float
Initial transformation .
threshold : float
Stop when change in cost is less than threshold
max _ iterations : int
Maximum number of iterations
kwargs : dict
Args to pass to procrustes
Returns
matrix : ( 4,4 ) float
The transformation matrix sending a to b
transformed : ( n , 3 ) float
The image of a under the transformation
cost : float
The cost of the transformation""" | a = np . asanyarray ( a , dtype = np . float64 )
if not util . is_shape ( a , ( - 1 , 3 ) ) :
raise ValueError ( 'points must be (n,3)!' )
is_mesh = util . is_instance_named ( b , 'Trimesh' )
if not is_mesh :
b = np . asanyarray ( b , dtype = np . float64 )
if not util . is_shape ( b , ( - 1 , 3 ) ) :
raise ValueError ( 'points must be (n,3)!' )
btree = cKDTree ( b )
# transform a under initial _ transformation
a = transform_points ( a , initial )
total_matrix = initial
# start with infinite cost
old_cost = np . inf
# avoid looping forever by capping iterations
for n_iteration in range ( max_iterations ) : # Closest point in b to each point in a
if is_mesh :
closest , distance , faces = b . nearest . on_surface ( a )
else :
distances , ix = btree . query ( a , 1 )
closest = b [ ix ]
# align a with closest points
matrix , transformed , cost = procrustes ( a = a , b = closest , ** kwargs )
# update a with our new transformed points
a = transformed
total_matrix = np . dot ( matrix , total_matrix )
if old_cost - cost < threshold :
break
else :
old_cost = cost
return total_matrix , transformed , cost |
def to_vcf ( self , path , rename = None , number = None , description = None , fill = None , write_header = True ) :
r"""Write to a variant call format ( VCF ) file .
Parameters
path : string
File path .
rename : dict , optional
Rename these columns in the VCF .
number : dict , optional
Override the number specified in INFO headers .
description : dict , optional
Descriptions for the INFO and FILTER headers .
fill : dict , optional
Fill values used for missing data in the table .
write _ header : bool , optional
If True write VCF header .
Examples
Setup a variant table to write out : :
> > > import allel
> > > chrom = [ b ' chr1 ' , b ' chr1 ' , b ' chr2 ' , b ' chr2 ' , b ' chr3 ' ]
> > > pos = [ 2 , 6 , 3 , 8 , 1]
> > > ids = [ ' a ' , ' b ' , ' c ' , ' d ' , ' e ' ]
> > > ref = [ b ' A ' , b ' C ' , b ' T ' , b ' G ' , b ' N ' ]
> > > alt = [ ( b ' T ' , b ' . ' ) ,
. . . ( b ' G ' , b ' . ' ) ,
. . . ( b ' A ' , b ' C ' ) ,
. . . ( b ' C ' , b ' A ' ) ,
. . . ( b ' X ' , b ' . ' ) ]
> > > qual = [ 1.2 , 2.3 , 3.4 , 4.5 , 5.6]
> > > filter _ qd = [ True , True , True , False , False ]
> > > filter _ dp = [ True , False , True , False , False ]
> > > dp = [ 12 , 23 , 34 , 45 , 56]
> > > qd = [ 12.3 , 23.4 , 34.5 , 45.6 , 56.7]
> > > flg = [ True , False , True , False , True ]
> > > ac = [ ( 1 , - 1 ) , ( 3 , - 1 ) , ( 5 , 6 ) , ( 7 , 8 ) , ( 9 , - 1 ) ]
> > > xx = [ ( 1.2 , 2.3 ) , ( 3.4 , 4.5 ) , ( 5.6 , 6.7 ) , ( 7.8 , 8.9 ) ,
. . . ( 9.0 , 9.9 ) ]
> > > columns = [ chrom , pos , ids , ref , alt , qual , filter _ dp ,
. . . filter _ qd , dp , qd , flg , ac , xx ]
> > > records = list ( zip ( * columns ) )
> > > dtype = [ ( ' CHROM ' , ' S4 ' ) ,
. . . ( ' POS ' , ' u4 ' ) ,
. . . ( ' ID ' , ' S1 ' ) ,
. . . ( ' REF ' , ' S1 ' ) ,
. . . ( ' ALT ' , ( ' S1 ' , 2 ) ) ,
. . . ( ' qual ' , ' f4 ' ) ,
. . . ( ' filter _ dp ' , bool ) ,
. . . ( ' filter _ qd ' , bool ) ,
. . . ( ' dp ' , int ) ,
. . . ( ' qd ' , float ) ,
. . . ( ' flg ' , bool ) ,
. . . ( ' ac ' , ( int , 2 ) ) ,
. . . ( ' xx ' , ( float , 2 ) ) ]
> > > vt = allel . VariantTable ( records , dtype = dtype )
Now write out to VCF and inspect the result : :
> > > rename = { ' dp ' : ' DP ' , ' qd ' : ' QD ' , ' filter _ qd ' : ' QD ' }
> > > fill = { ' ALT ' : b ' . ' , ' ac ' : - 1}
> > > number = { ' ac ' : ' A ' }
> > > description = { ' ac ' : ' Allele counts ' , ' filter _ dp ' : ' Low depth ' }
> > > vt . to _ vcf ( ' example . vcf ' , rename = rename , fill = fill ,
. . . number = number , description = description )
> > > print ( open ( ' example . vcf ' ) . read ( ) )
# # fileformat = VCFv4.1
# # fileDate = . . .
# # source = . . .
# # INFO = < ID = DP , Number = 1 , Type = Integer , Description = " " >
# # INFO = < ID = QD , Number = 1 , Type = Float , Description = " " >
# # INFO = < ID = ac , Number = A , Type = Integer , Description = " Allele counts " >
# # INFO = < ID = flg , Number = 0 , Type = Flag , Description = " " >
# # INFO = < ID = xx , Number = 2 , Type = Float , Description = " " >
# # FILTER = < ID = QD , Description = " " >
# # FILTER = < ID = dp , Description = " Low depth " >
# CHROMPOSIDREFALTQUALFILTERINFO
chr12aAT1.2QD ; dpDP = 12 ; QD = 12.3 ; ac = 1 ; flg ; xx = . . .
chr16bCG2.3QDDP = 23 ; QD = 23.4 ; ac = 3 ; xx = 3.4,4.5
chr23cTA , C3.4QD ; dpDP = 34 ; QD = 34.5 ; ac = 5,6 ; flg ; x . . .
chr28dGC , A4.5PASSDP = 45 ; QD = 45.6 ; ac = 7,8 ; xx = 7 . . .
chr31eNX5.6PASSDP = 56 ; QD = 56.7 ; ac = 9 ; flg ; xx = . . .""" | write_vcf ( path , callset = self , rename = rename , number = number , description = description , fill = fill , write_header = write_header ) |
def load_json ( file ) :
"""Load JSON file at app start""" | here = os . path . dirname ( os . path . abspath ( __file__ ) )
with open ( os . path . join ( here , file ) ) as jfile :
data = json . load ( jfile )
return data |
def render_tile ( cells , ti , tj , render , params , metadata , layout , summary ) :
"""Render each cell in the tile and stitch it into a single image""" | image_size = params [ "cell_size" ] * params [ "n_tile" ]
tile = Image . new ( "RGB" , ( image_size , image_size ) , ( 255 , 255 , 255 ) )
keys = cells . keys ( )
for i , key in enumerate ( keys ) :
print ( "cell" , i + 1 , "/" , len ( keys ) , end = '\r' )
cell_image = render ( cells [ key ] , params , metadata , layout , summary )
# stitch this rendering into the tile image
ci = key [ 0 ] % params [ "n_tile" ]
cj = key [ 1 ] % params [ "n_tile" ]
xmin = ci * params [ "cell_size" ]
ymin = cj * params [ "cell_size" ]
xmax = ( ci + 1 ) * params [ "cell_size" ]
ymax = ( cj + 1 ) * params [ "cell_size" ]
if params . get ( "scale_density" , False ) :
density = len ( cells [ key ] [ "gi" ] )
# scale = density / summary [ " max _ density " ]
scale = math . log ( density ) / ( math . log ( summary [ "max_density" ] ) or 1 )
owidth = xmax - xmin
width = int ( round ( owidth * scale ) )
if ( width < 1 ) :
width = 1
offsetL = int ( round ( ( owidth - width ) / 2 ) )
offsetR = owidth - width - offsetL
# handle odd numbers
# print ( " \ n " )
# print ( " width " , width , offsetL , offsetR )
box = [ xmin + offsetL , ymin + offsetL , xmax - offsetR , ymax - offsetR ]
resample = params . get ( "scale_type" , Image . NEAREST )
cell_image = cell_image . resize ( size = ( width , width ) , resample = resample )
# print ( cell _ image )
else :
box = [ xmin , ymin , xmax , ymax ]
# print ( " box " , box )
tile . paste ( cell_image , box )
print ( "\n" )
return tile |
def get_graph_metadata ( self , graph ) :
"""Get the model metadata from a given onnx graph .""" | _params = set ( )
for tensor_vals in graph . initializer :
_params . add ( tensor_vals . name )
input_data = [ ]
for graph_input in graph . input :
if graph_input . name not in _params :
shape = [ val . dim_value for val in graph_input . type . tensor_type . shape . dim ]
input_data . append ( ( graph_input . name , tuple ( shape ) ) )
output_data = [ ]
for graph_out in graph . output :
shape = [ val . dim_value for val in graph_out . type . tensor_type . shape . dim ]
output_data . append ( ( graph_out . name , tuple ( shape ) ) )
metadata = { 'input_tensor_data' : input_data , 'output_tensor_data' : output_data }
return metadata |
def outputs ( ctx , client , revision , paths ) :
r"""Show output files in the repository .
< PATHS > Files to show . If no files are given all output files are shown .""" | graph = Graph ( client )
filter = graph . build ( paths = paths , revision = revision )
output_paths = graph . output_paths
click . echo ( '\n' . join ( graph . _format_path ( path ) for path in output_paths ) )
if paths :
if not output_paths :
ctx . exit ( 1 )
from renku . models . _datastructures import DirectoryTree
tree = DirectoryTree . from_list ( item . path for item in filter )
for output in output_paths :
if tree . get ( output ) is None :
ctx . exit ( 1 )
return |
def collection ( self , attribute ) :
"""Returns the collection corresponding the attribute name .""" | return { "dependencies" : self . dependencies , "publics" : self . publics , "members" : self . members , "types" : self . types , "executables" : self . executables , "interfaces" : self . interfaces } [ attribute ] |
def remove_type_from_resource ( type_id , resource_type , resource_id , ** kwargs ) :
"""Remove a resource type trom a resource""" | node_id = resource_id if resource_type == 'NODE' else None
link_id = resource_id if resource_type == 'LINK' else None
group_id = resource_id if resource_type == 'GROUP' else None
resourcetype = db . DBSession . query ( ResourceType ) . filter ( ResourceType . type_id == type_id , ResourceType . ref_key == resource_type , ResourceType . node_id == node_id , ResourceType . link_id == link_id , ResourceType . group_id == group_id ) . one ( )
db . DBSession . delete ( resourcetype )
db . DBSession . flush ( )
return 'OK' |
def derivative ( self , point ) :
"""Derivative of this operator .
` ` PowerOperator ( p ) . derivative ( y ) ( x ) = = p * y * * ( p - 1 ) * x ` `
Parameters
point : ` domain ` element
The point in which to take the derivative
Returns
derivative : ` Operator `
The derivative in ` ` point ` `
Examples
Use on vector spaces :
> > > op = PowerOperator ( odl . rn ( 3 ) , exponent = 2)
> > > dop = op . derivative ( op . domain . element ( [ 1 , 2 , 3 ] ) )
> > > dop ( [ 1 , 1 , 1 ] )
rn ( 3 ) . element ( [ 2 . , 4 . , 6 . ] )
Use with scalars :
> > > op = PowerOperator ( odl . RealNumbers ( ) , exponent = 2)
> > > dop = op . derivative ( 2.0)
> > > dop ( 2.0)
8.0""" | return self . exponent * MultiplyOperator ( point ** ( self . exponent - 1 ) , domain = self . domain , range = self . range ) |
def snapshots ( self ) :
"""Get all Volumes of type Snapshot . Updates every time - no caching .
: return : a ` list ` of all the ` ScaleIO _ Volume ` that have a are of type Snapshot .
: rtype : list""" | self . connection . _check_login ( )
response = self . connection . _do_get ( "{}/{}" . format ( self . connection . _api_url , "types/Volume/instances" ) ) . json ( )
all_volumes_snapshot = [ ]
for volume in response :
if volume [ 'volumeType' ] == 'Snapshot' :
all_volumes_snapshot . append ( Volume . from_dict ( volume ) )
return all_volumes_snapshot |
def download ( feature_type , output_base_path , extent , progress_dialog = None , server_url = None ) :
"""Download shapefiles from Kartoza server .
. . versionadded : : 3.2
: param feature _ type : What kind of features should be downloaded .
Currently ' buildings ' , ' building - points ' or ' roads ' are supported .
: type feature _ type : str
: param output _ base _ path : The base path of the shape file .
: type output _ base _ path : str
: param extent : A list in the form [ xmin , ymin , xmax , ymax ] where all
coordinates provided are in Geographic / EPSG : 4326.
: type extent : list
: param progress _ dialog : A progress dialog .
: type progress _ dialog : QProgressDialog
: param server _ url : The server URL to use .
: type : basestring
: raises : ImportDialogError , CanceledImportDialogError""" | if not server_url :
server_url = PRODUCTION_SERVER
# preparing necessary data
min_longitude = extent [ 0 ]
min_latitude = extent [ 1 ]
max_longitude = extent [ 2 ]
max_latitude = extent [ 3 ]
box = ( '{min_longitude},{min_latitude},{max_longitude},' '{max_latitude}' ) . format ( min_longitude = min_longitude , min_latitude = min_latitude , max_longitude = max_longitude , max_latitude = max_latitude )
url = ( '{url_osm_prefix}' '{feature_type}' '{url_osm_suffix}?' 'bbox={box}&' 'qgis_version={qgis}&' 'lang={lang}&' 'inasafe_version={inasafe_version}' . format ( url_osm_prefix = server_url , feature_type = feature_type , url_osm_suffix = URL_OSM_SUFFIX , box = box , qgis = qgis_version ( ) , lang = locale ( ) , inasafe_version = get_version ( ) ) )
path = tempfile . mktemp ( '.shp.zip' )
# download and extract it
fetch_zip ( url , path , feature_type , progress_dialog )
extract_zip ( path , output_base_path )
if progress_dialog :
progress_dialog . done ( QDialog . Accepted ) |
def resolve ( self ) :
'Resolve pathname shell variables and ~ userdir' | return os . path . expandvars ( os . path . expanduser ( self . fqpn ) ) |
def hgetall ( self , key , * , encoding = _NOTSET ) :
"""Get all the fields and values in a hash .""" | fut = self . execute ( b'HGETALL' , key , encoding = encoding )
return wait_make_dict ( fut ) |
def scan ( self , search_path = None ) :
"""Scan ` search _ path ` for distributions usable in this environment
Any distributions found are added to the environment .
` search _ path ` should be a sequence of ` ` sys . path ` ` items . If not
supplied , ` ` sys . path ` ` is used . Only distributions conforming to
the platform / python version defined at initialization are added .""" | if search_path is None :
search_path = sys . path
for item in search_path :
for dist in find_distributions ( item ) :
self . add ( dist ) |
def initialize ( self , conf , ctx ) :
"""Initialization steps :
1 . Prepare elasticsearch connection , including details for indexing .""" | config = get_config ( ) [ 'ElasticsearchIndexBolt' ]
elasticsearch_class = import_name ( config [ 'elasticsearch_class' ] )
self . es = elasticsearch_class ( ** config [ 'elasticsearch_init' ] )
self . index = config [ 'index' ]
self . doc_type = config [ 'doc_type' ] |
def generateAcceptHeader ( * elements ) :
"""Generate an accept header value
[ str or ( str , float ) ] - > str""" | parts = [ ]
for element in elements :
if type ( element ) is str :
qs = "1.0"
mtype = element
else :
mtype , q = element
q = float ( q )
if q > 1 or q <= 0 :
raise ValueError ( 'Invalid preference factor: %r' % q )
qs = '%0.1f' % ( q , )
parts . append ( ( qs , mtype ) )
parts . sort ( )
chunks = [ ]
for q , mtype in parts :
if q == '1.0' :
chunks . append ( mtype )
else :
chunks . append ( '%s; q=%s' % ( mtype , q ) )
return ', ' . join ( chunks ) |
def delete_floatingip ( self , floatingip_id ) :
'''Deletes the specified floatingip''' | ret = self . network_conn . delete_floatingip ( floatingip_id )
return ret if ret else True |
def relabel_squeeze ( data ) :
"""Makes relabeling of data if there are unused values .""" | palette , index = np . unique ( data , return_inverse = True )
data = index . reshape ( data . shape )
# realy slow solution
# unq = np . unique ( data )
# actual _ label = 0
# for lab in unq :
# data [ data = = lab ] = actual _ label
# actual _ label + = 1
# one another solution probably slower
# arr = data
# data = ( np . digitize ( arr . reshape ( - 1 , ) , np . unique ( arr ) ) - 1 ) . reshape ( arr . shape )
return data |
from collections import Counter
from itertools import chain
def element_frequency ( lists ) :
"""Calculate the frequency of each element in a list of lists using collections module .
Examples :
> > > element _ frequency ( [ [ 1 , 2 , 3 , 2 ] , [ 4 , 5 , 6 , 2 ] , [ 7 , 1 , 9 , 5 ] ] )
{2 : 3 , 1 : 2 , 5 : 2 , 3 : 1 , 4 : 1 , 6 : 1 , 7 : 1 , 9 : 1}
> > > element _ frequency ( [ [ 1 , 2 , 3 , 4 ] , [ 5 , 6 , 7 , 8 ] , [ 9 , 10 , 11 , 12 ] ] )
{1 : 1 , 2 : 1 , 3 : 1 , 4 : 1 , 5 : 1 , 6 : 1 , 7 : 1 , 8 : 1 , 9 : 1 , 10 : 1 , 11 : 1 , 12 : 1}
> > > element _ frequency ( [ [ 15 , 20 , 30 , 40 ] , [ 80 , 90 , 100 , 110 ] , [ 30 , 30 , 80 , 90 ] ] )
{30 : 3 , 80 : 2 , 90 : 2 , 15 : 1 , 20 : 1 , 40 : 1 , 100 : 1 , 110 : 1}
: param lists : List of lists storing the numbers .
: return : A Counter object that includes each number as a key and its frequency as a value .""" | freq_table = Counter ( chain ( * lists ) )
return freq_table |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.