signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def is_true ( value = None ) :
'''Returns a boolean value representing the " truth " of the value passed . The
rules for what is a " True " value are :
1 . Integer / float values greater than 0
2 . The string values " True " and " true "
3 . Any object for which bool ( obj ) returns True'''
|
# First , try int / float conversion
try :
value = int ( value )
except ( ValueError , TypeError ) :
pass
try :
value = float ( value )
except ( ValueError , TypeError ) :
pass
# Now check for truthiness
if isinstance ( value , ( six . integer_types , float ) ) :
return value > 0
elif isinstance ( value , six . string_types ) :
return six . text_type ( value ) . lower ( ) == 'true'
else :
return bool ( value )
|
def bna_config_cmd_input_dest ( self , ** kwargs ) :
"""Auto Generated Code"""
|
config = ET . Element ( "config" )
bna_config_cmd = ET . Element ( "bna_config_cmd" )
config = bna_config_cmd
input = ET . SubElement ( bna_config_cmd , "input" )
dest = ET . SubElement ( input , "dest" )
dest . text = kwargs . pop ( 'dest' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config )
|
def get_byte ( self , i ) :
"""Get byte ."""
|
value = [ ]
for x in range ( 2 ) :
c = next ( i )
if c . lower ( ) in _HEX :
value . append ( c )
else : # pragma : no cover
raise SyntaxError ( 'Invalid byte character at %d!' % ( i . index - 1 ) )
return '' . join ( value )
|
def newpin ( digits = 4 ) :
"""Return a random numeric string with the specified number of digits ,
default 4.
> > > len ( newpin ( ) )
> > > len ( newpin ( 5 ) )
> > > newpin ( ) . isdigit ( )
True"""
|
randnum = randint ( 0 , 10 ** digits )
while len ( str ( randnum ) ) > digits :
randnum = randint ( 0 , 10 ** digits )
return ( u'%%0%dd' % digits ) % randnum
|
def _calculate_weights ( self , this_samples , N ) :
"""Calculate and save the weights of a run ."""
|
this_weights = self . weights . append ( N ) [ : , 0 ]
if self . target_values is None :
for i in range ( N ) :
tmp = self . target ( this_samples [ i ] ) - self . proposal . evaluate ( this_samples [ i ] )
this_weights [ i ] = _exp ( tmp )
else :
this_target_values = self . target_values . append ( N )
for i in range ( N ) :
this_target_values [ i ] = self . target ( this_samples [ i ] )
tmp = this_target_values [ i ] - self . proposal . evaluate ( this_samples [ i ] )
this_weights [ i ] = _exp ( tmp )
|
def condition_indices ( df ) :
'''Returns a pandas Series with condition indices of the df columns .
Args :
df : pandas DataFrame with columns to run diagnostics on'''
|
eigvals = eigenvalues ( df )
cond_idx = np . sqrt ( eigvals . max ( ) / eigvals )
return pd . Series ( cond_idx , df . columns , name = 'Condition index' )
|
def add_cmd_handler ( self , handler_obj ) :
"""Registers a new command handler object .
All methods on ` handler _ obj ` whose name starts with " cmd _ " are
registered as a GTP command . For example , the method cmd _ genmove will
be invoked when the engine receives a genmove command .
Args :
handler _ obj : the handler object to register ."""
|
for field in dir ( handler_obj ) :
if field . startswith ( "cmd_" ) :
cmd = field [ 4 : ]
fn = getattr ( handler_obj , field )
if cmd in self . cmds :
print ( 'Replacing {} with {}' . format ( _handler_name ( self . cmds [ cmd ] ) , _handler_name ( fn ) ) , file = sys . stderr )
self . cmds [ cmd ] = fn
|
def get_job_input ( self , job_id ) :
"""GetJobInput
https : / / apidocs . joyent . com / manta / api . html # GetJobInput
with the added sugar that it will retrieve the archived job if it has
been archived , per :
https : / / apidocs . joyent . com / manta / jobs - reference . html # job - completion - and - archival"""
|
try :
return RawMantaClient . get_job_input ( self , job_id )
except errors . MantaAPIError as ex :
if ex . res . status != 404 :
raise
# Job was archived , try to retrieve the archived data .
mpath = "/%s/jobs/%s/in.txt" % ( self . account , job_id )
content = self . get_object ( mpath )
keys = content . splitlines ( False )
return keys
|
def export_glb ( scene , extras = None , include_normals = False ) :
"""Export a scene as a binary GLTF ( GLB ) file .
Parameters
scene : trimesh . Scene
Input geometry
extras : JSON serializable
Will be stored in the extras field
include _ normals : bool
Include vertex normals in output file ?
Returns
exported : bytes
Exported result in GLB 2.0"""
|
# if we were passed a bare Trimesh or Path3D object
if not util . is_instance_named ( scene , "Scene" ) and hasattr ( scene , "scene" ) : # generate a scene with just that mesh in it
scene = scene . scene ( )
tree , buffer_items = _create_gltf_structure ( scene = scene , extras = extras , include_normals = include_normals )
# A bufferView is a slice of a file
views = [ ]
# create the buffer views
current_pos = 0
for current_item in buffer_items :
views . append ( { "buffer" : 0 , "byteOffset" : current_pos , "byteLength" : len ( current_item ) } )
current_pos += len ( current_item )
buffer_data = bytes ( ) . join ( buffer_items )
tree [ "buffers" ] = [ { "byteLength" : len ( buffer_data ) } ]
tree [ "bufferViews" ] = views
# export the tree to JSON for the content of the file
content = json . dumps ( tree )
# add spaces to content , so the start of the data
# is 4 byte aligned as per spec
content += ( 4 - ( ( len ( content ) + 20 ) % 4 ) ) * " "
content = content . encode ( "utf-8" )
# make sure we didn ' t screw it up
assert ( len ( content ) % 4 ) == 0
# the initial header of the file
header = _byte_pad ( np . array ( [ _magic [ "gltf" ] , # magic , turns into glTF
2 , # GLTF version
# length is the total length of the Binary glTF
# including Header and all Chunks , in bytes .
len ( content ) + len ( buffer_data ) + 28 , # contentLength is the length , in bytes ,
# of the glTF content ( JSON )
len ( content ) , # magic number which is ' JSON '
1313821514 ] , dtype = "<u4" , ) . tobytes ( ) )
# the header of the binary data section
bin_header = _byte_pad ( np . array ( [ len ( buffer_data ) , 0x004E4942 ] , dtype = "<u4" ) . tobytes ( ) )
exported = bytes ( ) . join ( [ header , content , bin_header , buffer_data ] )
return exported
|
def percentile ( self , percentile ) :
"""Calculate a given spectral percentile for this ` Spectrogram ` .
Parameters
percentile : ` float `
percentile ( 0 - 100 ) of the bins to compute
Returns
spectrum : ` ~ gwpy . frequencyseries . FrequencySeries `
the given percentile ` FrequencySeries ` calculated from this
` SpectralVaraicence `"""
|
out = scipy . percentile ( self . value , percentile , axis = 0 )
if self . name is not None :
name = '{}: {} percentile' . format ( self . name , _ordinal ( percentile ) )
else :
name = None
return FrequencySeries ( out , epoch = self . epoch , channel = self . channel , name = name , f0 = self . f0 , df = self . df , frequencies = ( hasattr ( self , '_frequencies' ) and self . frequencies or None ) )
|
def set_scene_name ( self , scene_id , name ) :
"""rename a scene by scene ID"""
|
if not scene_id in self . state . scenes : # does that scene _ id exist ?
err_msg = "Requested to rename scene {sceneNum}, which does not exist" . format ( sceneNum = scene_id )
logging . info ( err_msg )
return ( False , 0 , err_msg )
self . state . scenes [ scene_id ] = self . state . scenes [ scene_id ] . _replace ( name = name )
# TODO : is there a better solution ?
sequence_number = self . zmq_publisher . publish_scene_name ( scene_id , name )
logging . debug ( "Renamed scene {sceneNum}" . format ( sceneNum = scene_id ) )
return ( True , sequence_number , "OK" )
|
def import_laid_out_tensor ( mesh , laid_out_tensor , shape , name = None ) :
"""Import a laid _ out _ tensor .
For expert users .
The input must be laid out appropriately given the eventual MeshImpl ,
and layout .
Args :
mesh : a Mesh
laid _ out _ tensor : a LaidOutTensor
shape : a mtf . Shape
name : an optional string
Returns :
a mtf . Tensor"""
|
return ImportLaidOutTensorOperation ( mesh , laid_out_tensor , convert_to_shape ( shape ) , name = name ) . outputs [ 0 ]
|
def _parse_textgroup ( self , cts_file ) :
"""Parses a textgroup from a cts file
: param cts _ file : Path to the CTS File
: type cts _ file : str
: return : CtsTextgroupMetadata and Current file"""
|
with io . open ( cts_file ) as __xml__ :
return self . classes [ "textgroup" ] . parse ( resource = __xml__ ) , cts_file
|
def psql ( self , * psqlargs ) :
"""Run a psql command"""
|
db , env = self . get_db_args_env ( )
args = [ '-v' , 'ON_ERROR_STOP=on' , '-d' , db [ 'name' ] , '-h' , db [ 'host' ] , '-U' , db [ 'user' ] , '-w' , '-A' , '-t' ] + list ( psqlargs )
stdout , stderr = External . run ( 'psql' , args , capturestd = True , env = env )
if stderr :
log . warn ( 'stderr: %s' , stderr )
log . debug ( 'stdout: %s' , stdout )
return stdout
|
def migrate_secret_key ( old_key ) :
"""Call entry points exposed for the SECRET _ KEY change ."""
|
if 'SECRET_KEY' not in current_app . config or current_app . config [ 'SECRET_KEY' ] is None :
raise click . ClickException ( 'SECRET_KEY is not set in the configuration.' )
for ep in iter_entry_points ( 'invenio_base.secret_key' ) :
try :
ep . load ( ) ( old_key = old_key )
except Exception :
current_app . logger . error ( 'Failed to initialize entry point: {0}' . format ( ep ) )
raise
click . secho ( 'Successfully changed secret key.' , fg = 'green' )
|
def modify_identity ( self , identity , ** kwargs ) :
"""Modify some attributes of an identity or its name .
: param : identity a zobjects . Identity with ` id ` set ( mandatory ) . Also
set items you want to modify / set and / or the ` name ` attribute to
rename the identity .
Can also take the name in string and then attributes to modify
: returns : zobjects . Identity object"""
|
if isinstance ( identity , zobjects . Identity ) :
self . request ( 'ModifyIdentity' , { 'identity' : identity . _full_data } )
return self . get_identities ( identity = identity . name ) [ 0 ]
else :
attrs = [ ]
for attr , value in kwargs . items ( ) :
attrs . append ( { 'name' : attr , '_content' : value } )
self . request ( 'ModifyIdentity' , { 'identity' : { 'name' : identity , 'a' : attrs } } )
return self . get_identities ( identity = identity ) [ 0 ]
|
def Jacobian_re_im ( self , pars ) :
r""": math : ` J `
> > > import sip _ models . res . cc as cc
> > > import numpy as np
> > > f = np . logspace ( - 3 , 3 , 20)
> > > pars = [ 100 , 0.1 , 0.04 , 0.8]
> > > obj = cc . cc ( f )
> > > J = obj . Jacobian _ re _ im ( pars )"""
|
partials = [ ]
# partials . append ( self . dre _ dlog10rho0 ( pars ) [ : , np . newaxis , : ] )
partials . append ( self . dre_drho0 ( pars ) [ : , np . newaxis ] )
partials . append ( self . dre_dm ( pars ) )
# partials . append ( self . dre _ dlog10tau ( pars ) )
partials . append ( self . dre_dtau ( pars ) )
partials . append ( self . dre_dc ( pars ) )
# partials . append ( self . dim _ dlog10rho0 ( pars ) [ : , np . newaxis , : ] )
partials . append ( self . dim_drho0 ( pars ) [ : , np . newaxis ] )
partials . append ( self . dim_dm ( pars ) )
# partials . append ( self . dim _ dlog10tau ( pars ) )
partials . append ( self . dim_dtau ( pars ) )
partials . append ( self . dim_dc ( pars ) )
print ( 'SHAPES' )
for x in partials :
print ( x . shape )
J = np . concatenate ( partials , axis = 1 )
return J
|
def DEFINE_flag ( flag , flag_values = _flagvalues . FLAGS , module_name = None ) : # pylint : disable = invalid - name
"""Registers a ' Flag ' object with a ' FlagValues ' object .
By default , the global FLAGS ' FlagValue ' object is used .
Typical users will use one of the more specialized DEFINE _ xxx
functions , such as DEFINE _ string or DEFINE _ integer . But developers
who need to create Flag objects themselves should use this function
to register their flags .
Args :
flag : Flag , a flag that is key to the module .
flag _ values : FlagValues , the FlagValues instance with which the flag will
be registered . This should almost never need to be overridden .
module _ name : str , the name of the Python module declaring this flag .
If not provided , it will be computed using the stack trace of this call ."""
|
# Copying the reference to flag _ values prevents pychecker warnings .
fv = flag_values
fv [ flag . name ] = flag
# Tell flag _ values who ' s defining the flag .
if module_name :
module = sys . modules . get ( module_name )
else :
module , module_name = _helpers . get_calling_module_object_and_name ( )
flag_values . register_flag_by_module ( module_name , flag )
flag_values . register_flag_by_module_id ( id ( module ) , flag )
|
def main ( ) :
"""Command line entry point ."""
|
def help_exit ( ) :
raise SystemExit ( "usage: ddate [day] [month] [year]" )
if "--help" in sys . argv or "-h" in sys . argv :
help_exit ( )
if len ( sys . argv ) == 2 : # allow for 23-2-2014 style , be lazy / sloppy with it
for split_char in ".-/`,:;" : # who knows what the human will use . . .
if split_char in sys . argv [ 1 ] :
parts = sys . argv [ 1 ] . split ( split_char )
del sys . argv [ 1 ]
sys . argv . extend ( parts )
break
date = _get_date ( * sys . argv [ 1 : ] )
if date :
print ( DDate ( date ) )
else :
help_exit ( )
|
def _map_content_types ( archetype_tool , catalogs_definition ) :
"""Updates the mapping for content _ types against catalogs
: archetype _ tool : an archetype _ tool object
: catalogs _ definition : a dictionary like
CATALOG _ ID : {
' types ' : [ ' ContentType ' , . . . ] ,
' indexes ' : {
' UID ' : ' FieldIndex ' ,
' columns ' : [
' Title ' ,"""
|
# This will be a dictionari like { ' content _ type ' : [ ' catalog _ id ' , . . . ] }
ct_map = { }
# This list will contain the atalog ids to be rebuild
to_reindex = [ ]
# getting the dictionary of mapped content _ types in the catalog
map_types = archetype_tool . catalog_map
for catalog_id in catalogs_definition . keys ( ) :
catalog_info = catalogs_definition . get ( catalog_id , { } )
# Mapping the catalog with the defined types
types = catalog_info . get ( 'types' , [ ] )
for t in types :
tmp_l = ct_map . get ( t , [ ] )
tmp_l . append ( catalog_id )
ct_map [ t ] = tmp_l
# Mapping
for t in ct_map . keys ( ) :
catalogs_list = ct_map [ t ]
# Getting the previus mapping
perv_catalogs_list = archetype_tool . catalog_map . get ( t , [ ] )
# If the mapping has changed , update it
set1 = set ( catalogs_list )
set2 = set ( perv_catalogs_list )
if set1 != set2 :
archetype_tool . setCatalogsByType ( t , catalogs_list )
# Adding to reindex only the catalogs that have changed
to_reindex = to_reindex + list ( set1 - set2 ) + list ( set2 - set1 )
return to_reindex
|
def _filter_seqs ( fn ) :
"""Convert names of sequences to unique ids"""
|
out_file = op . splitext ( fn ) [ 0 ] + "_unique.fa"
idx = 0
if not file_exists ( out_file ) :
with open ( out_file , 'w' ) as out_handle :
with open ( fn ) as in_handle :
for line in in_handle :
if line . startswith ( "@" ) or line . startswith ( ">" ) :
fixed_name = _make_unique ( line . strip ( ) , idx )
seq = in_handle . next ( ) . strip ( )
counts = _get_freq ( fixed_name )
if len ( seq ) < 26 and ( counts > 1 or counts == 0 ) :
idx += 1
print ( fixed_name , file = out_handle , end = "\n" )
print ( seq , file = out_handle , end = "\n" )
if line . startswith ( "@" ) :
in_handle . next ( )
in_handle . next ( )
return out_file
|
def modify ( self , sort = None , purge = False , done = None , undone = None ) :
"""Handles the ' m ' command .
: sort : Sort pattern .
: purge : Whether to purge items marked as ' done ' .
: done : Done pattern .
: undone : Not done pattern ."""
|
self . model . modifyInPlace ( sort = self . _getPattern ( sort ) , purge = purge , done = self . _getDone ( done , undone ) )
|
def isPositiveMap ( self ) :
"""Returns true if increasing ra increases pix in skyToPix ( )"""
|
x0 , y0 = self . skyToPix ( self . ra0_deg , self . dec0_deg )
x1 , y1 = self . skyToPix ( self . ra0_deg + 1 / 3600. , self . dec0_deg )
if x1 > x0 :
return True
return False
|
def new_topic ( self , title , content ) :
"""小组发贴
: return : 帖子 id 或 ` ` None ` `"""
|
data = { 'title' : title , 'body' : content , 'csrfmiddlewaretoken' : self . _request . cookies . get ( 'csrftoken' ) }
url = 'http://www.shanbay.com/api/v1/forum/%s/thread/' % self . forum_id ( )
r = self . request ( url , 'post' , data = data )
j = r . json ( )
if j [ 'status_code' ] == 0 :
return j [ 'data' ] [ 'thread' ] [ 'id' ]
|
def clearkml ( self ) :
'''Clear the kmls from the map'''
|
# go through all the current layers and remove them
for layer in self . curlayers :
self . mpstate . map . remove_object ( layer )
for layer in self . curtextlayers :
self . mpstate . map . remove_object ( layer )
self . allayers = [ ]
self . curlayers = [ ]
self . alltextlayers = [ ]
self . curtextlayers = [ ]
self . menu_needs_refreshing = True
|
def url_tibiadata ( self ) :
""": class : ` str ` : The URL to the highscores page on TibiaData . com containing the results ."""
|
return self . get_url_tibiadata ( self . world , self . category , self . vocation )
|
def get_prior_alignment ( self ) :
"""Return the prior alignment that was used for 2D basecalling .
: return : Alignment data table ."""
|
data_group = '{}/HairpinAlign' . format ( self . group_name )
data = self . handle . get_analysis_dataset ( data_group , 'Alignment' )
return data
|
def check_file_exists ( self , remote_cmd = "" ) :
"""Check if the dest _ file already exists on the file system ( return boolean ) ."""
|
if self . direction == "put" :
if not remote_cmd :
remote_cmd = "dir {}{}" . format ( self . file_system , self . dest_file )
remote_out = self . ssh_ctl_chan . send_command_expect ( remote_cmd )
search_string = r"{}.*Usage for" . format ( self . dest_file )
if "No such file or directory" in remote_out :
return False
elif re . search ( search_string , remote_out , flags = re . DOTALL ) :
return True
else :
raise ValueError ( "Unexpected output from check_file_exists" )
elif self . direction == "get" :
return os . path . exists ( self . dest_file )
|
def h_function ( self , x , xsquare ) :
"""Computes the h - function as defined in Paillier ' s paper page 12,
' Decryption using Chinese - remaindering ' ."""
|
return invert ( self . l_function ( powmod ( self . public_key . g , x - 1 , xsquare ) , x ) , x )
|
def _todict ( cls ) :
"""generate a dict keyed by value"""
|
return dict ( ( getattr ( cls , attr ) , attr ) for attr in dir ( cls ) if not attr . startswith ( '_' ) )
|
def idle_task ( self ) :
'''called rapidly by mavproxy'''
|
if self . downloaders_lock . acquire ( False ) :
removed_one = False
for url in self . downloaders . keys ( ) :
if not self . downloaders [ url ] . is_alive ( ) :
print ( "fw: Download thread for (%s) done" % url )
del self . downloaders [ url ]
removed_one = True
if removed_one and not self . downloaders . keys ( ) : # all downloads finished - parse them
self . manifests_parse ( )
self . downloaders_lock . release ( )
|
def get_number ( self ) :
'''. . versionchanged : : 0.9.2
Add support for float exponent strings ( e . g . , ` ` 3.435e - 7 ` ` ) .
Fixes ` issue # 4 < https : / / github . com / wheeler - microfluidics / svg - model / issues / 4 > ` .'''
|
number = None
start = self . get_char ( '0123456789.-' )
if start :
number = start
finish = self . get_chars ( '-e0123456789.' )
if finish :
number += finish
if any ( c in number for c in '.e' ) :
return float ( number )
else :
return int ( number )
|
def total_length ( self ) :
"""Returns the total length of the captions ."""
|
if not self . _captions :
return 0
return int ( self . _captions [ - 1 ] . end_in_seconds ) - int ( self . _captions [ 0 ] . start_in_seconds )
|
def worker ( f ) :
"""Generic wrapper to log uncaught exceptions in a function .
When we cross concurrent . futures executor boundaries we lose our
traceback information , and when doing bulk operations we may tolerate
transient failures on a partial subset . However we still want to have
full accounting of the error in the logs , in a format that our error
collection ( cwl subscription ) can still pickup ."""
|
def _f ( * args , ** kw ) :
try :
return f ( * args , ** kw )
except Exception :
worker_log . exception ( 'Error invoking %s' , "%s.%s" % ( f . __module__ , f . __name__ ) )
raise
functools . update_wrapper ( _f , f )
return _f
|
def sg_max ( tensor , opt ) :
r"""Computes the maximum of elements across axis of a tensor .
See ` tf . reduce _ max ( ) ` in tensorflow .
Args :
tensor : A ` Tensor ` ( automatically given by chain ) .
opt :
axis : A tuple / list of integers or an integer . The axis to reduce .
keep _ dims : If true , retains reduced dimensions with length 1.
name : If provided , replace current tensor ' s name .
Returns :
A ` Tensor ` ."""
|
return tf . reduce_max ( tensor , axis = opt . axis , keep_dims = opt . keep_dims , name = opt . name )
|
def _process_stream_delta ( self , delta_stream ) :
"""Bookkeeping on internal data structures while iterating a stream ."""
|
for pchange in delta_stream :
if pchange . kind == ChangeType . ADD :
self . policy_files . setdefault ( pchange . file_path , PolicyCollection ( ) ) . add ( pchange . policy )
elif pchange . kind == ChangeType . REMOVE :
self . policy_files [ pchange . file_path ] . remove ( pchange . policy )
elif pchange . kind in ( ChangeType . MOVED , ChangeType . MODIFIED ) :
if pchange . policy . file_path != pchange . previous . file_path :
self . policy_files [ pchange . previous . file_path ] . remove ( pchange . previous )
if ( pchange . policy . file_path in self . policy_files and pchange . policy . name in self . policy_files [ pchange . file_path ] ) :
self . policy_files [ pchange . file_path ] [ pchange . policy . name ] = pchange . policy
else :
self . policy_files . setdefault ( pchange . file_path , PolicyCollection ( ) ) . add ( pchange . policy )
else :
self . policy_files [ pchange . file_path ] [ pchange . policy . name ] = pchange . policy
yield pchange
|
def _open_icmp_socket ( self , family ) :
"""Opens a socket suitable for sending / receiving ICMP echo
requests / responses ."""
|
try :
proto = socket . IPPROTO_ICMP if family == socket . AF_INET else _IPPROTO_ICMPV6
return socket . socket ( family , socket . SOCK_RAW , proto )
except socket . error as e :
if e . errno == 1 :
raise MultiPingError ( "Root privileges required for sending " "ICMP" )
# Re - raise any other error
raise
|
def search ( self , text , limit = 1000 , order_by = None , sort_order = None , filter = None ) :
"""Do a fulltext search for series in the Fred dataset . Returns information about matching series in a DataFrame .
Parameters
text : str
text to do fulltext search on , e . g . , ' Real GDP '
limit : int , optional
limit the number of results to this value . If limit is 0 , it means fetching all results without limit .
order _ by : str , optional
order the results by a criterion . Valid options are ' search _ rank ' , ' series _ id ' , ' title ' , ' units ' , ' frequency ' ,
' seasonal _ adjustment ' , ' realtime _ start ' , ' realtime _ end ' , ' last _ updated ' , ' observation _ start ' , ' observation _ end ' ,
' popularity '
sort _ order : str , optional
sort the results by ascending or descending order . Valid options are ' asc ' or ' desc '
filter : tuple , optional
filters the results . Expects a tuple like ( filter _ variable , filter _ value ) .
Valid filter _ variable values are ' frequency ' , ' units ' , and ' seasonal _ adjustment '
Returns
info : DataFrame
a DataFrame containing information about the matching Fred series"""
|
url = "%s/series/search?search_text=%s&" % ( self . root_url , quote_plus ( text ) )
info = self . __get_search_results ( url , limit , order_by , sort_order , filter )
return info
|
def user_present ( name , uid , password , channel = 14 , callback = False , link_auth = True , ipmi_msg = True , privilege_level = 'administrator' , ** kwargs ) :
'''Ensure IPMI user and user privileges .
name
name of user ( limit 16 bytes )
uid
user id number ( 1 to 7)
password
user password ( limit 16 bytes )
channel
ipmi channel defaults to 14 for auto
callback
User Restricted to Callback
False = User Privilege Limit is determined by the User Privilege Limit
parameter privilege _ level , for both callback and non - callback connections .
True = User Privilege Limit is determined by the privilege _ level
parameter for callback connections , but is restricted to Callback
level for non - callback connections . Thus , a user can only initiate
a Callback when they ' call in ' to the BMC , but once the callback
connection has been made , the user could potentially establish a
session as an Operator .
link _ auth
User Link authentication
True / False
user name and password information will be used for link
authentication , e . g . PPP CHAP ) for the given channel . Link
authentication itself is a global setting for the channel and is
enabled / disabled via the serial / modem configuration parameters .
ipmi _ msg
User IPMI Messaging
True / False
user name and password information will be used for IPMI
Messaging . In this case , ' IPMI Messaging ' refers to the ability to
execute generic IPMI commands that are not associated with a
particular payload type . For example , if IPMI Messaging is disabled for
a user , but that user is enabled for activating the SOL
payload type , then IPMI commands associated with SOL and session
management , such as Get SOL Configuration Parameters and Close Session
are available , but generic IPMI commands such as Get SEL Time are
unavailable . )
ipmi _ msg
privilege _ level
* callback
* user
* operator
* administrator
* proprietary
* no _ access
kwargs
- api _ host = localhost
- api _ user = admin
- api _ pass =
- api _ port = 623
- api _ kg = None'''
|
ret = { 'name' : name , 'result' : False , 'comment' : '' , 'changes' : { } }
org_user = __salt__ [ 'ipmi.get_user' ] ( uid = uid , channel = channel , ** kwargs )
change = False
if org_user [ 'access' ] [ 'callback' ] != callback :
change = True
if org_user [ 'access' ] [ 'link_auth' ] != link_auth :
change = True
if org_user [ 'access' ] [ 'ipmi_msg' ] != ipmi_msg :
change = True
if org_user [ 'access' ] [ 'privilege_level' ] != privilege_level :
change = True
if __salt__ [ 'ipmi.set_user_password' ] ( uid , mode = 'test_password' , password = password , ** kwargs ) is False :
change = True
if change is False :
ret [ 'result' ] = True
ret [ 'comment' ] = 'user already present'
return ret
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'would (re)create user'
ret [ 'result' ] = None
ret [ 'changes' ] = { 'old' : org_user , 'new' : name }
return ret
__salt__ [ 'ipmi.ensure_user' ] ( uid , name , password , channel , callback , link_auth , ipmi_msg , privilege_level , ** kwargs )
current_user = __salt__ [ 'ipmi.get_user' ] ( uid = uid , channel = channel , ** kwargs )
ret [ 'comment' ] = '(re)created user'
ret [ 'result' ] = True
ret [ 'changes' ] = { 'old' : org_user , 'new' : current_user }
return ret
|
def end_headers ( self ) :
"""Send the blank line ending the MIME headers ."""
|
if self . request_version != 'HTTP/0.9' :
self . _headers_buffer . append ( b"\r\n" )
self . flush_headers ( )
|
def get_unique_scan_parameter_combinations ( meta_data_array , scan_parameters = None , scan_parameter_columns_only = False ) :
'''Takes the numpy meta data array and returns the first rows with unique combinations of different scan parameter values for selected scan parameters .
If selected columns only is true , the returned histogram only contains the selected columns .
Parameters
meta _ data _ array : numpy . ndarray
scan _ parameters : list of string , None
Scan parameter names taken . If None all are used .
selected _ columns _ only : bool
Returns
numpy . Histogram'''
|
try :
last_not_parameter_column = meta_data_array . dtype . names . index ( 'error_code' )
# for interpreted meta _ data
except ValueError :
last_not_parameter_column = meta_data_array . dtype . names . index ( 'error' )
# for raw data file meta _ data
if last_not_parameter_column == len ( meta_data_array . dtype . names ) - 1 : # no meta _ data found
return
if scan_parameters is None :
return unique_row ( meta_data_array , use_columns = range ( 4 , len ( meta_data_array . dtype . names ) ) , selected_columns_only = scan_parameter_columns_only )
else :
use_columns = [ ]
for scan_parameter in scan_parameters :
try :
use_columns . append ( meta_data_array . dtype . names . index ( scan_parameter ) )
except ValueError :
logging . error ( 'No scan parameter ' + scan_parameter + ' found' )
raise RuntimeError ( 'Scan parameter not found' )
return unique_row ( meta_data_array , use_columns = use_columns , selected_columns_only = scan_parameter_columns_only )
|
def random ( self ) :
"""Returns a random value that fits this column ' s parameters .
: return : < variant >"""
|
minimum = self . minimum ( ) or 0
maximum = self . maximum ( ) or 100
return random . randint ( minimum , maximum )
|
def to_csv ( self , path , mode = WRITE_MODE , dialect = 'excel' , compression = None , newline = '' , ** fmtparams ) :
"""Saves the sequence to a csv file . Each element should be an iterable which will be expanded
to the elements of each row .
: param path : path to write file
: param mode : file open mode
: param dialect : passed to csv . writer
: param fmtparams : passed to csv . writer"""
|
if 'b' in mode :
newline = None
with universal_write_open ( path , mode = mode , compression = compression , newline = newline ) as output :
csv_writer = csv . writer ( output , dialect = dialect , ** fmtparams )
for row in self :
csv_writer . writerow ( [ six . u ( str ( element ) ) for element in row ] )
|
def plot_ARD ( kernel , filtering = None , legend = False , canvas = None , ** kwargs ) :
"""If an ARD kernel is present , plot a bar representation using matplotlib
: param fignum : figure number of the plot
: param filtering : list of names , which to use for plotting ARD parameters .
Only kernels which match names in the list of names in filtering
will be used for plotting .
: type filtering : list of names to use for ARD plot"""
|
Tango . reset ( )
ard_params = np . atleast_2d ( kernel . input_sensitivity ( summarize = False ) )
bottom = 0
last_bottom = bottom
x = np . arange ( kernel . _effective_input_dim )
parts = [ ]
def visit ( x ) :
if ( not isinstance ( x , CombinationKernel ) ) and isinstance ( x , Kern ) :
parts . append ( x )
kernel . traverse ( visit )
if filtering is None :
filtering = [ k . name for k in parts ]
bars = [ ]
kwargs = update_not_existing_kwargs ( kwargs , pl ( ) . defaults . ard )
if canvas is None :
canvas , kwargs = pl ( ) . new_canvas ( xlim = ( - .5 , kernel . _effective_input_dim - .5 ) , xlabel = 'input dimension' , ylabel = 'ard contribution' , ** kwargs )
for i in range ( ard_params . shape [ 0 ] ) :
if parts [ i ] . name in filtering :
c = Tango . nextMedium ( )
bars . append ( pl ( ) . barplot ( canvas , x , ard_params [ i , : ] , color = c , label = parts [ i ] . name , bottom = bottom , ** kwargs ) )
last_bottom = ard_params [ i , : ]
bottom += last_bottom
else :
print ( "filtering out {}" . format ( parts [ i ] . name ) )
# add _ bar _ labels ( fig , ax , [ bars [ - 1 ] ] , bottom = bottom - last _ bottom )
return pl ( ) . add_to_canvas ( canvas , bars , legend = legend )
|
def send_location ( self , number , name , url , latitude , longitude ) :
"""Send location message
: param str number : phone number with cc ( country code )
: param str name : indentifier for the location
: param str url : location url
: param str longitude : location longitude
: param str latitude : location latitude"""
|
location_message = LocationMediaMessageProtocolEntity ( latitude , longitude , name , url , encoding = "raw" , to = self . normalize_jid ( number ) )
self . toLower ( location_message )
return location_message
|
def _load_certificate ( location ) :
"""Load a certificate from the given location .
Args :
location ( str ) : The location to load . This can either be an HTTPS URL or an absolute file
path . This is intended to be used with PEM - encoded certificates and therefore assumes
ASCII encoding .
Returns :
str : The PEM - encoded certificate as a unicode string .
Raises :
requests . exception . RequestException : Any exception requests could raise .
IOError : If the location provided could not be opened and read ."""
|
if location . startswith ( 'https://' ) :
_log . info ( 'Downloading x509 certificate from %s' , location )
with requests . Session ( ) as session :
session . mount ( 'https://' , requests . adapters . HTTPAdapter ( max_retries = 3 ) )
response = session . get ( location , timeout = 30 )
response . raise_for_status ( )
return response . text
else :
_log . info ( 'Loading local x509 certificate from %s' , location )
with open ( location , 'rb' ) as fd :
return fd . read ( ) . decode ( 'ascii' )
|
def UpgradeReferenceFields ( ) :
"""Convert all ReferenceField ' s values into UIDReferenceFields .
These are not touched : HistoryAware to be removed :
- Analysis . Calculation : HistoryAwareReferenceField ( rel =
AnalysisCalculation )
- DuplicateAnalysis . Calculation : HistoryAwareReferenceField ( rel =
DuplicateAnalysisCalculation )
- RejectAnalysis . Calculation : HistoryAwareReferenceField ( rel =
RejectAnalysisCalculation )
These are not touched : They are deprecated and will be removed :
- AnalysisRequest . Profile : ReferenceField ( rel = AnalysisRequestProfile )
- LabContact . Department ReferenceField ( rel = LabContactDepartment )
The remaining fields are listed below ."""
|
# Change these carefully
# they were made slowly with love
# still they may be wrong .
for portal_type , fields in [ # portal _ type
[ 'ARReport' , [ ( 'AnalysisRequest' , 'ARReportAnalysisRequest' ) ] ] , [ 'Analysis' , [ # AbstractBaseAnalysis
( 'Category' , 'AnalysisCategory' ) , ( 'Department' , 'AnalysisDepartment' ) , ( 'Instrument' , 'AnalysisInstrument' ) , ( 'Method' , 'AnalysisMethod' ) , # AbstractAnalysis
( 'AnalysisService' , 'AnalysisAnalysisService' ) , ( 'Attachment' , 'AnalysisAttachment' ) , # AbstractRoutineAnalysis
( 'OriginalReflexedAnalysis' , 'AnalysisOriginalReflexedAnalysis' ) , ( 'ReflexAnalysisOf' , 'AnalysisReflexAnalysisOf' ) , ( 'SamplePartition' , 'AnalysisSamplePartition' ) ] ] , [ 'ReferenceAnalysis' , [ # AbstractBaseAnalysis
( 'Category' , 'AnalysisCategory' ) , ( 'Department' , 'AnalysisDepartment' ) , ( 'Instrument' , 'AnalysisInstrument' ) , ( 'Method' , 'AnalysisMethod' ) , # AbstractAnalysis
( 'AnalysisService' , 'AnalysisAnalysisService' ) , ( 'Attachment' , 'AnalysisAttachment' ) , ] ] , [ 'DuplicateAnalysis' , [ # AbstractBaseAnalysis
( 'Category' , 'AnalysisCategory' ) , ( 'Department' , 'AnalysisDepartment' ) , ( 'Instrument' , 'AnalysisInstrument' ) , ( 'Method' , 'AnalysisMethod' ) , # AbstractAnalysis
( 'AnalysisService' , 'AnalysisAnalysisService' ) , ( 'Attachment' , 'AnalysisAttachment' ) , # AbstractRoutineAnalysis
( 'OriginalReflexedAnalysis' , 'AnalysisOriginalReflexedAnalysis' ) , ( 'ReflexAnalysisOf' , 'AnalysisReflexAnalysisOf' ) , ( 'SamplePartition' , 'AnalysisSamplePartition' ) , # DuplicateAnalysis
( 'Analysis' , 'DuplicateAnalysisAnalysis' ) , ] ] , [ 'AnalysisService' , [ # AbstractBaseAnalysis
( 'Category' , 'AnalysisCategory' ) , ( 'Department' , 'AnalysisDepartment' ) , ( 'Instrument' , 'AnalysisInstrument' ) , ( 'Method' , 'AnalysisMethod' ) , # AnalysisService
( 'Calculation' , 'AnalysisServiceCalculation' ) , ( 'Container' , 'AnalysisServiceContainer' ) , ( 'Instruments' , 'AnalysisServiceInstruments' ) , ( 'Methods' , 'AnalysisServiceMethods' ) , ( 'Preservation' , 'AnalysisServicePreservation' ) , # Backward compatibility with < 1.0.0
( 'Calculation' , 'AnalysisServiceCalculation' ) , ( 'Category' , 'AnalysisServiceAnalysisCategory' ) , ( 'Department' , 'AnalysisServiceDepartment' ) , ( 'Instrument' , 'AnalysisServiceInstrument' ) , ( 'Instruments' , 'AnalysisServiceInstruments' ) , ( 'Method' , 'AnalysisServiceMethod' ) , ( 'Methods' , 'AnalysisServiceMethods' ) , ( 'Preservation' , 'AnalysisServicePreservation' ) , ( 'Container' , 'AnalysisServiceContainer' ) , ] ] , [ 'AnalysisRequest' , [ ( 'Contact' , 'AnalysisRequestContact' ) , ( 'Sample' , 'AnalysisRequestSample' ) , ] ] , [ 'AnalysisSpec' , [ ( 'SampleType' , 'AnalysisSpecSampleType' ) ] ] , [ 'Calculation' , [ ( 'DependentServices' , 'CalculationDependentServices' ) , ( 'DependentServices' , 'CalculationAnalysisService' ) , ] ] , [ 'Instrument' , [ ( 'Analyses' , 'InstrumentAnalyses' ) , ( 'Method' , 'InstrumentMethod' ) , ] ] , [ 'Method' , [ ( 'Calculation' , 'MethodCalculation' ) , ] ] , [ 'SamplePartition' , [ ( 'Analyses' , 'SamplePartitionAnalysis' ) , ] ] , [ 'Worksheet' , [ ( 'WorksheetTemplate' , 'WorksheetAnalysisTemplate' ) ] ] ] :
logger . info ( "Migrating references for portal_type: %s" % portal_type )
for fieldname , relation in fields :
if is_UIDReferenceField ( portal_type , fieldname ) :
migrate_refs ( portal_type , relation , fieldname )
# remove at refs
for remove in refs_to_remove :
del_at_refs ( remove )
# reindex objects
for obj in objs_to_reindex :
obj . reindexObject ( )
|
def get_value ( self , sid , dt , field ) :
"""Retrieve the value at the given coordinates .
Parameters
sid : int
The asset identifier .
dt : pd . Timestamp
The timestamp for the desired data point .
field : string
The OHLVC name for the desired data point .
Returns
value : float | int
The value at the given coordinates , ` ` float ` ` for OHLC , ` ` int ` `
for ' volume ' .
Raises
NoDataOnDate
If the given dt is not a valid market minute ( in minute mode ) or
session ( in daily mode ) according to this reader ' s tradingcalendar .
NoDataForSid
If the given sid is not valid ."""
|
try :
country_code = self . _country_code_for_assets ( [ sid ] )
except ValueError as exc :
raise_from ( NoDataForSid ( 'Asset not contained in daily pricing file: {}' . format ( sid ) ) , exc )
return self . _readers [ country_code ] . get_value ( sid , dt , field )
|
def get_container_data ( self ) :
"""Returns a list of Container data"""
|
for obj in self . get_containers ( ) :
info = self . get_base_info ( obj )
yield info
|
def decode_data ( response_content , data_type , entire_response = None ) :
"""Interprets downloaded data and returns it .
: param response _ content : downloaded data ( i . e . json , png , tiff , xml , zip , . . . file )
: type response _ content : bytes
: param data _ type : expected downloaded data type
: type data _ type : constants . MimeType
: param entire _ response : A response obtained from execution of download request
: type entire _ response : requests . Response or dict or None
: return : downloaded data
: rtype : numpy array in case of image data type , or other possible data type
: raises : ValueError"""
|
LOGGER . debug ( 'data_type=%s' , data_type )
if data_type is MimeType . JSON :
if isinstance ( entire_response , requests . Response ) :
return entire_response . json ( )
return json . loads ( response_content . decode ( 'utf-8' ) )
if MimeType . is_image_format ( data_type ) :
return decode_image ( response_content , data_type )
if data_type is MimeType . XML or data_type is MimeType . GML or data_type is MimeType . SAFE :
return ElementTree . fromstring ( response_content )
try :
return { MimeType . RAW : response_content , MimeType . TXT : response_content , MimeType . REQUESTS_RESPONSE : entire_response , MimeType . ZIP : BytesIO ( response_content ) } [ data_type ]
except KeyError :
raise ValueError ( 'Unknown response data type {}' . format ( data_type ) )
|
def insertDict ( self , tblname , d , fields = None ) :
'''Simple function for inserting a dictionary whose keys match the fieldnames of tblname .'''
|
if fields == None :
fields = sorted ( d . keys ( ) )
values = None
try :
SQL = 'INSERT INTO %s (%s) VALUES (%s)' % ( tblname , join ( fields , ", " ) , join ( [ '%s' for x in range ( len ( fields ) ) ] , ',' ) )
values = tuple ( [ d [ k ] for k in fields ] )
self . locked_execute ( SQL , parameters = values )
except Exception , e :
if SQL and values :
sys . stderr . write ( "\nSQL execution error in query '%s' %% %s at %s:" % ( SQL , values , datetime . now ( ) . strftime ( "%Y-%m-%d %H:%M:%S" ) ) )
sys . stderr . write ( "\nError: '%s'.\n" % ( str ( e ) ) )
sys . stderr . flush ( )
raise Exception ( "Error occurred during database insertion: '%s'." % str ( e ) )
|
def _find_or_create_version ( self , product ) :
"""Create a Version in MetaDeploy if it doesn ' t already exist"""
|
tag = self . options [ "tag" ]
label = self . project_config . get_version_for_tag ( tag )
result = self . _call_api ( "GET" , "/versions" , params = { "product" : product [ "id" ] , "label" : label } )
if len ( result [ "data" ] ) == 0 :
version = self . _call_api ( "POST" , "/versions" , json = { "product" : product [ "url" ] , "label" : label , "description" : self . options . get ( "description" , "" ) , "is_production" : True , "commit_ish" : tag , "is_listed" : False , } , )
self . logger . info ( "Created {}" . format ( version [ "url" ] ) )
else :
version = result [ "data" ] [ 0 ]
self . logger . info ( "Found {}" . format ( version [ "url" ] ) )
return version
|
def real_main ( start_url = None , ignore_prefixes = None , upload_build_id = None , upload_release_name = None ) :
"""Runs the site _ diff ."""
|
coordinator = workers . get_coordinator ( )
fetch_worker . register ( coordinator )
coordinator . start ( )
item = SiteDiff ( start_url = start_url , ignore_prefixes = ignore_prefixes , upload_build_id = upload_build_id , upload_release_name = upload_release_name , heartbeat = workers . PrintWorkflow )
item . root = True
coordinator . input_queue . put ( item )
coordinator . wait_one ( )
coordinator . stop ( )
coordinator . join ( )
|
def do_thaw ( client , args ) :
"""Execute the thaw operation , pulling in an actual Vault
client if neccesary"""
|
vault_client = None
if args . gpg_pass_path :
vault_client = client . connect ( args )
aomi . filez . thaw ( vault_client , args . icefile , args )
sys . exit ( 0 )
|
def file_handles ( self ) -> Iterable [ IO [ str ] ] :
"""Generates all file handles represented by the analysis .
Callee owns file handle and closes it when the next is yielded or the
generator ends ."""
|
if self . file_handle :
yield self . file_handle
self . file_handle . close ( )
self . file_handle = None
else :
for name in self . file_names ( ) :
with open ( name , "r" ) as f :
yield f
|
def build_query_string ( self , data ) :
"""This method occurs after dumping the data into the class .
Args :
data ( dict ) : dictionary of all the query values
Returns :
data ( dict ) : ordered dict of all the values"""
|
query = [ ]
keys_to_be_removed = [ ]
for key , value in data . items ( ) :
if key not in [ 'version' , 'restApi' , 'resourcePath' ] :
if not key == 'method' :
if key == 'points' :
value = ',' . join ( str ( val ) for val in value )
keys_to_be_removed . append ( key )
query . append ( '{0}={1}' . format ( key , value ) )
keys_to_be_removed . append ( key )
keys_to_be_removed . append ( key )
querystring = '&' . join ( query )
data [ 'query' ] = '{0}?{1}' . format ( data [ 'method' ] , querystring )
for k in list ( set ( keys_to_be_removed ) ) :
del data [ k ]
return data
|
def reinit_on_backend_changes ( tf_bin , # pylint : disable = too - many - arguments
module_path , backend_options , env_name , env_region , env_vars ) :
"""Clean terraform directory and run init if necessary .
If deploying a TF module to multiple regions ( or any scenario requiring
multiple backend configs ) , switching the backend will cause TF to
compare the old and new backends . This will frequently cause an access
error as the creds / role for the new backend won ' t always have access to
the old one .
This method compares the defined & initialized backend configs and
trashes the terraform directory & re - inits if they ' re out of sync ."""
|
terraform_dir = os . path . join ( module_path , '.terraform' )
local_tfstate_path = os . path . join ( terraform_dir , 'terraform.tfstate' )
current_backend_config = { }
desired_backend_config = { }
LOGGER . debug ( 'Comparing previous & desired Terraform backend configs' )
if os . path . isfile ( local_tfstate_path ) :
with open ( local_tfstate_path , 'r' ) as stream :
current_backend_config = hcl . load ( stream ) . get ( 'backend' , { } ) . get ( 'config' , { } )
if backend_options . get ( 'config' ) :
desired_backend_config = backend_options . get ( 'config' )
elif os . path . isfile ( os . path . join ( module_path , backend_options . get ( 'filename' ) ) ) :
with open ( os . path . join ( module_path , backend_options . get ( 'filename' ) ) , 'r' ) as stream :
desired_backend_config = hcl . load ( stream )
# Can ' t solely rely on the backend info defined in runway options or
# backend files ; merge in the values defined in main . tf
# ( or whatever tf file )
for filename in [ 'main.tf' ] + glob . glob ( os . path . join ( module_path , '*.tf' ) ) :
if os . path . isfile ( filename ) :
with open ( filename , 'r' ) as stream :
tf_config = hcl . load ( stream )
if tf_config . get ( 'terraform' , { } ) . get ( 'backend' ) :
[ ( _s3key , tffile_backend_config ) ] = tf_config [ 'terraform' ] [ 'backend' ] . items ( )
# noqa pylint : disable = line - too - long
desired_backend_config = merge_dicts ( desired_backend_config , tffile_backend_config )
break
if current_backend_config != desired_backend_config :
LOGGER . info ( "Desired and previously initialized TF backend config is " "out of sync; trashing local TF state directory %s" , terraform_dir )
send2trash ( terraform_dir )
run_terraform_init ( tf_bin = tf_bin , module_path = module_path , backend_options = backend_options , env_name = env_name , env_region = env_region , env_vars = env_vars )
|
def cdata ( self , data ) :
"""Print HTML cdata .
@ param data : the character data
@ type data : string
@ return : None"""
|
data = data . encode ( self . encoding , "ignore" )
self . fd . write ( "<![CDATA[%s]]>" % data )
|
def ExportClientsByKeywords ( keywords , filename , token = None ) :
r"""A script to export clients summaries selected by a keyword search .
This script does a client search for machines matching all of keywords and
writes a . csv summary of the results to filename . Multi - value fields are ' \ n '
separated .
Args :
keywords : a list of keywords to search for
filename : the name of the file to write to , will be replaced if already
present
token : datastore token ."""
|
index = client_index . CreateClientIndex ( token = token )
client_list = index . LookupClients ( keywords )
logging . info ( "found %d clients" , len ( client_list ) )
if not client_list :
return
writer = csv . DictWriter ( [ u"client_id" , u"hostname" , u"last_seen" , u"os" , u"os_release" , u"os_version" , u"users" , u"ips" , u"macs" , ] )
writer . WriteHeader ( )
for client in aff4 . FACTORY . MultiOpen ( client_list , token = token ) :
s = client . Schema
writer . WriteRow ( { u"client_id" : client . urn . Basename ( ) , u"hostname" : client . Get ( s . HOSTNAME ) , u"os" : client . Get ( s . SYSTEM ) , u"os_release" : client . Get ( s . OS_RELEASE ) , u"os_version" : client . Get ( s . OS_VERSION ) , u"ips" : client . Get ( s . HOST_IPS ) , u"macs" : client . Get ( s . MAC_ADDRESS ) , u"users" : "\n" . join ( client . Get ( s . USERNAMES , [ ] ) ) , u"last_seen" : client . Get ( s . PING ) , } )
with io . open ( filename , "w" ) as csv_out :
csv_out . write ( writer . Content ( ) )
|
def activatePdpContextRequest ( AccessPointName_presence = 0 , ProtocolConfigurationOptions_presence = 0 ) :
"""ACTIVATE PDP CONTEXT REQUEST Section 9.5.1"""
|
a = TpPd ( pd = 0x8 )
b = MessageType ( mesType = 0x41 )
# 01000001
c = NetworkServiceAccessPointIdentifier ( )
d = LlcServiceAccessPointIdentifier ( )
e = QualityOfService ( )
f = PacketDataProtocolAddress ( )
packet = a / b / c / d / e / f
if AccessPointName_presence is 1 :
g = AccessPointName ( ieiAPN = 0x28 )
packet = packet / g
if ProtocolConfigurationOptions_presence is 1 :
h = ProtocolConfigurationOptions ( ieiPCO = 0x27 )
packet = packet / h
return packet
|
def addStep ( self , key ) :
"""Add information about a new step to the dict of steps
The value ' ptime ' is the output from ' _ ptime ( ) ' containing
both the formatted and unformatted time for the start of the
step ."""
|
ptime = _ptime ( )
print ( '==== Processing Step ' , key , ' started at ' , ptime [ 0 ] )
self . steps [ key ] = { 'start' : ptime }
self . order . append ( key )
|
def try_printout ( data , out , opts , ** kwargs ) :
'''Safely get the string to print out , try the configured outputter , then
fall back to nested and then to raw'''
|
try :
printout = get_printout ( out , opts ) ( data , ** kwargs )
if printout is not None :
return printout . rstrip ( )
except ( KeyError , AttributeError , TypeError ) :
log . debug ( traceback . format_exc ( ) )
try :
printout = get_printout ( 'nested' , opts ) ( data , ** kwargs )
if printout is not None :
return printout . rstrip ( )
except ( KeyError , AttributeError , TypeError ) :
log . error ( 'Nested output failed: ' , exc_info = True )
printout = get_printout ( 'raw' , opts ) ( data , ** kwargs )
if printout is not None :
return printout . rstrip ( )
|
def _get_db_password ( dbSystem , db , user ) :
"""Read through the users . dbrc file to get password for the db / user
combination suplied . If no password is found then prompt for one"""
|
import string , getpass , os
dbrc = os . environ [ 'HOME' ] + "/.dbrc"
password = { }
if os . access ( dbrc , os . R_OK ) :
fd = open ( dbrc )
lines = fd . readlines ( )
for line in lines :
entry = line . split ( )
if entry [ 0 ] == dbSystem and entry [ 1 ] == db and entry [ 2 ] == user :
return entry [ 3 ]
return getpass . getpass ( )
|
def find ( self , _limit = None , _offset = 0 , _step = 5000 , order_by = "id" , return_count = False , ** _filter ) :
"""Performs a simple search on the table . Simply pass keyword arguments as ` ` filter ` ` .
results = table . find ( country = ' France ' )
results = table . find ( country = ' France ' , year = 1980)
Using ` ` _ limit ` ` : :
# just return the first 10 rows
results = table . find ( country = ' France ' , _ limit = 10)
You can sort the results by single or multiple columns . Append a minus sign
to the column name for descending order : :
# sort results by a column ' year '
results = table . find ( country = ' France ' , order _ by = ' year ' )
# return all rows sorted by multiple columns ( by year in descending order )
results = table . find ( order _ by = [ ' country ' , ' - year ' ] )
By default : py : meth : ` find ( ) < dataset . Table . find > ` will break the
query into chunks of ` ` _ step ` ` rows to prevent huge tables
from being loaded into memory at once .
For more complex queries , please use : py : meth : ` db . query ( ) `
instead ."""
|
self . _check_dropped ( )
if not isinstance ( order_by , ( list , tuple ) ) :
order_by = [ order_by ]
order_by = [ o for o in order_by if ( o . startswith ( "-" ) and o [ 1 : ] or o ) in self . table . columns ]
order_by = [ self . _args_to_order_by ( o ) for o in order_by ]
args = self . _args_to_clause ( _filter )
# query total number of rows first
count_query = alias ( self . table . select ( whereclause = args , limit = _limit , offset = _offset ) , name = "count_query_alias" , ) . count ( )
rp = self . engine . execute ( count_query )
total_row_count = rp . fetchone ( ) [ 0 ]
if return_count :
return total_row_count
if _limit is None :
_limit = total_row_count
if _step is None or _step is False or _step == 0 :
_step = total_row_count
if total_row_count > _step and not order_by :
_step = total_row_count
log . warn ( "query cannot be broken into smaller sections because it is unordered" )
queries = [ ]
for i in count ( ) :
qoffset = _offset + ( _step * i )
qlimit = min ( _limit - ( _step * i ) , _step )
if qlimit <= 0 :
break
queries . append ( self . table . select ( whereclause = args , limit = qlimit , offset = qoffset , order_by = order_by ) )
return ResultIter ( ( self . engine . execute ( q ) for q in queries ) , row_type = self . db . row_type )
|
def casefold ( s , fullcasefold = True , useturkicmapping = False ) :
"""Function for performing case folding . This function will take the input
string s and return a copy of the string suitable for caseless comparisons .
The input string must be of type ' unicode ' , otherwise a TypeError will be
raised .
For more information on case folding , see section 3.13 of the Unicode Standard .
See also the following FAQ on the Unicode website :
https : / / unicode . org / faq / casemap _ charprop . htm
By default , full case folding ( where the string length may change ) is done .
It is possible to use simple case folding ( single character mappings only )
by setting the boolean parameter fullcasefold = False .
By default , case folding does not handle the Turkic case of dotted vs dotless ' i ' .
To perform case folding using the special Turkic mappings , pass the boolean
parameter useturkicmapping = True . For more info on the dotted vs dotless ' i ' , see
the following web pages :
https : / / en . wikipedia . org / wiki / Dotted _ and _ dotless _ I
http : / / www . i18nguy . com / unicode / turkish - i18n . html # problem
: param s : String to transform
: param fullcasefold : Boolean indicating if a full case fold ( default is True ) should be done . If False , a simple
case fold will be performed .
: param useturkicmapping : Boolean indicating if the special turkic mapping ( default is False ) for the dotted and
dotless ' i ' should be used .
: return : Copy of string that has been transformed for caseless comparison ."""
|
if not isinstance ( s , six . text_type ) :
raise TypeError ( u"String to casefold must be of type 'unicode'!" )
lookup_order = "CF"
if not fullcasefold :
lookup_order = "CS"
if useturkicmapping :
lookup_order = "T" + lookup_order
return u"" . join ( [ casefold_map . lookup ( c , lookup_order = lookup_order ) for c in preservesurrogates ( s ) ] )
|
def open ( cls , path ) :
"""Load an image file into a PIX object .
Leptonica can load TIFF , PNM ( PBM , PGM , PPM ) , PNG , and JPEG . If
loading fails then the object will wrap a C null pointer ."""
|
filename = fspath ( path )
with _LeptonicaErrorTrap ( ) :
return cls ( lept . pixRead ( os . fsencode ( filename ) ) )
|
def pandoc ( script = None , input = None , output = None , args = '{input:q} --output {output:q}' , ** kwargs ) :
'''Convert input file to output using pandoc
The input can be specified in three ways :
1 . instant script , which is assumed to be in md format
pandoc : output = ' report . html '
script
2 . one or more input files . The format is determined by extension of input file
pandoc ( input , output = ' report . html ' )
3 . input file specified by command line option ` - r ` .
pandoc ( output = ' report . html ' )
If no output is specified , it is assumed to be in html format
and is written to standard output .
You can specify more options such as " from " and " to " by customizing
the args parameter of the action . The default value of args is
` { input : q } - - output { output : q } ' '''
|
# # this is output format
# pandoc [ OPTIONS ] [ FILES ]
# Input formats : commonmark , docbook , docx , epub , haddock , html , json * , latex ,
# markdown , markdown _ github , markdown _ mmd , markdown _ phpextra ,
# markdown _ strict , mediawiki , native , odt , opml , org , rst , t2t ,
# textile , twiki
# [ * only Pandoc ' s JSON version of native AST ]
# Output formats : asciidoc , beamer , commonmark , context , docbook , docx , dokuwiki ,
# dzslides , epub , epub3 , fb2 , haddock , html , html5 , icml , json * ,
# latex , man , markdown , markdown _ github , markdown _ mmd ,
# markdown _ phpextra , markdown _ strict , mediawiki , native , odt ,
# opendocument , opml , org , pdf * * , plain , revealjs , rst , rtf , s5,
# slideous , slidy , tei , texinfo , textile
# [ * * for pdf output , use latex or beamer and - o FILENAME . pdf ]
# Options :
# - f FORMAT , - r FORMAT - - from = FORMAT , - - read = FORMAT
# - t FORMAT , - w FORMAT - - to = FORMAT , - - write = FORMAT
# - o FILENAME - - output = FILENAME
# - - data - dir = DIRECTORY
# - R - - parse - raw
# - S - - smart
# IGNORED
if not executable ( 'pandoc' ) . target_exists ( ) :
raise RuntimeError ( 'pandoc not found' )
input = sos_targets ( collect_input ( script , input ) )
output = sos_targets ( output )
if len ( output ) == 0 :
write_to_stdout = True
output = sos_targets ( tempfile . NamedTemporaryFile ( mode = 'w+t' , suffix = '.html' , delete = False ) . name )
else :
write_to_stdout = False
ret = 1
try :
p = None
cmd = interpolate ( f'pandoc {args}' , { 'input' : input , 'output' : output } )
if 'ACTION' in env . config [ 'SOS_DEBUG' ] or 'ALL' in env . config [ 'SOS_DEBUG' ] :
env . log_to_file ( 'ACTION' , f'Running command "{cmd}"' )
if env . config [ 'run_mode' ] == 'interactive' : # need to catch output and send to python output , which will in trun be hijacked by SoS notebook
from . utils import pexpect_run
ret = pexpect_run ( cmd )
else :
p = subprocess . Popen ( cmd , shell = True )
ret = p . wait ( )
except Exception as e :
env . logger . error ( e )
if ret != 0 :
temp_file = os . path . join ( '.sos' , f'pandoc_{os.getpid()}.md' )
shutil . copyfile ( input , temp_file )
cmd = interpolate ( f'pandoc {args}' , { 'input' : sos_targets ( temp_file ) , 'output' : sos_targets ( output ) } )
raise RuntimeError ( f'Failed to execute script. Please use command \n{cmd}\nunder {os.getcwd()} to test it.' )
if write_to_stdout :
with open ( output [ 0 ] . fullname ( ) ) as out :
sys . stdout . write ( out . read ( ) )
else :
env . logger . info ( f'Report saved to {output}' )
try :
os . remove ( input )
except Exception :
pass
|
def write ( self ) :
"""Write csv file of resolved names and txt file of unresolved names ."""
|
csv_file = os . path . join ( self . outdir , 'search_results.csv' )
txt_file = os . path . join ( self . outdir , 'unresolved.txt' )
headers = self . key_terms
unresolved = [ ]
with open ( csv_file , 'w' ) as file :
writer = csv . writer ( file )
writer . writerow ( headers )
for key in list ( self . _store . keys ( ) ) :
results = self . _store [ key ]
if len ( results ) == 0 :
unresolved . append ( key )
else :
row = [ key ]
for key_term in headers [ 1 : ] :
element = results [ 0 ] [ key_term ]
# GNR returns UTF - 8 , csv requires ascii
# * * * Note * * *
# According to all docs for csv versions > = 2.6 , csv
# can handle either UTF - 8 or ascii , just not Unicode .
# In py3 , the following two lines result in csv printing
# the element with a bitstring . If GNR is actually
# returning UTF - 8 , it seems easiest to just drop these
# if ' encode ' in dir ( element ) :
# element = element . encode ( ' ascii ' )
row . append ( element )
writer . writerow ( row )
if len ( unresolved ) > 0 :
with open ( txt_file , 'w' ) as file :
for name in unresolved :
file . write ( "{0}\n" . format ( name ) )
|
def whitespace_around_operator ( logical_line ) :
r"""Avoid extraneous whitespace around an operator .
Okay : a = 12 + 3
E221 : a = 4 + 5
E222 : a = 4 + 5
E223 : a = 4 \ t + 5
E224 : a = 4 + \ t5"""
|
for match in OPERATOR_REGEX . finditer ( logical_line ) :
before , after = match . groups ( )
if '\t' in before :
yield match . start ( 1 ) , "E223 tab before operator"
elif len ( before ) > 1 :
yield match . start ( 1 ) , "E221 multiple spaces before operator"
if '\t' in after :
yield match . start ( 2 ) , "E224 tab after operator"
elif len ( after ) > 1 :
yield match . start ( 2 ) , "E222 multiple spaces after operator"
|
def open ( self , data_source , * args , ** kwargs ) :
"""Open filename to get data for data _ source .
: param data _ source : Data source for which the file contains data .
: type data _ source : str
Positional and keyword arguments can contain either the data to use for
the data source or the full path of the file which contains data for the
data source ."""
|
if self . sources [ data_source ] . _meta . data_reader . is_file_reader :
filename = kwargs . get ( 'filename' )
path = kwargs . get ( 'path' , '' )
rel_path = kwargs . get ( 'rel_path' , '' )
if len ( args ) > 0 :
filename = args [ 0 ]
if len ( args ) > 1 :
path = args [ 1 ]
if len ( args ) > 2 :
rel_path = args [ 2 ]
args = ( )
kwargs = { 'filename' : os . path . join ( rel_path , path , filename ) }
LOGGER . debug ( 'filename: %s' , kwargs [ 'filename' ] )
# call constructor of data source with filename argument
self . objects [ data_source ] = self . sources [ data_source ] ( * args , ** kwargs )
# register data and uncertainty in registry
data_src_obj = self . objects [ data_source ]
meta = [ getattr ( data_src_obj , m ) for m in self . reg . meta_names ]
self . reg . register ( data_src_obj . data , * meta )
|
def queue_context_entry ( exchange , queue_name , routing = None ) :
"""forms queue ' s context entry"""
|
if routing is None :
routing = queue_name
queue_entry = QueueContextEntry ( mq_queue = queue_name , mq_exchange = exchange , mq_routing_key = routing )
return queue_entry
|
def basis_functions ( degree , knot_vector , spans , knots ) :
"""Computes the non - vanishing basis functions for a list of parameters .
: param degree : degree , : math : ` p `
: type degree : int
: param knot _ vector : knot vector , : math : ` U `
: type knot _ vector : list , tuple
: param spans : list of knot spans
: type spans : list , tuple
: param knots : list of knots or parameters
: type knots : list , tuple
: return : basis functions
: rtype : list"""
|
basis = [ ]
for span , knot in zip ( spans , knots ) :
basis . append ( basis_function ( degree , knot_vector , span , knot ) )
return basis
|
def syllabify ( word ) :
'''Syllabify the given word , whether simplex or complex .'''
|
compound = not word . isalpha ( )
syllabify = _syllabify_complex if compound else _syllabify_simplex
syllabifications = list ( syllabify ( word ) )
# if variation , order variants from most preferred to least preferred
if len ( syllabifications ) > 1 :
syllabifications = rank ( syllabifications )
for word , rules in syllabifications :
yield _post_process ( word , rules )
|
def extract_function_metadata ( wrapped , instance , args , kwargs , return_value ) :
"""Stash the ` args ` and ` kwargs ` into the metadata of the subsegment ."""
|
LOGGER . debug ( 'Extracting function call metadata' , args = args , kwargs = kwargs , )
return { 'metadata' : { 'args' : args , 'kwargs' : kwargs , } , }
|
def index_layer ( self , layer_id , use_cache = False ) :
"""Index a layer in the search backend .
If cache is set , append it to the list , if it isn ' t send the transaction right away .
cache needs memcached to be available ."""
|
from hypermap . aggregator . models import Layer
layer = Layer . objects . get ( id = layer_id )
if not layer . is_valid :
LOGGER . debug ( 'Not indexing or removing layer with id %s in search engine as it is not valid' % layer . id )
unindex_layer ( layer . id , use_cache )
return
if layer . was_deleted :
LOGGER . debug ( 'Not indexing or removing layer with id %s in search engine as was_deleted is true' % layer . id )
unindex_layer ( layer . id , use_cache )
return
# 1 . if we use cache
if use_cache :
LOGGER . debug ( 'Caching layer with id %s for syncing with search engine' % layer . id )
layers = cache . get ( 'layers' )
if layers is None :
layers = set ( [ layer . id ] )
else :
layers . add ( layer . id )
cache . set ( 'layers' , layers )
return
# 2 . if we don ' t use cache
# TODO : Make this function more DRY
# by abstracting the common bits .
if SEARCH_TYPE == 'solr' :
from hypermap . aggregator . solr import SolrHypermap
LOGGER . debug ( 'Syncing layer %s to solr' % layer . name )
solrobject = SolrHypermap ( )
success , message = solrobject . layer_to_solr ( layer )
# update the error message if using celery
if not settings . REGISTRY_SKIP_CELERY :
if not success :
self . update_state ( state = states . FAILURE , meta = message )
raise Ignore ( )
elif SEARCH_TYPE == 'elasticsearch' :
from hypermap . aggregator . elasticsearch_client import ESHypermap
LOGGER . debug ( 'Syncing layer %s to es' % layer . name )
esobject = ESHypermap ( )
success , message = esobject . layer_to_es ( layer )
# update the error message if using celery
if not settings . REGISTRY_SKIP_CELERY :
if not success :
self . update_state ( state = states . FAILURE , meta = message )
raise Ignore ( )
|
def log_to_file ( logdir , mode = 'a' , delete = False , clearmem = True ) :
"""Like : class : ` log _ to ( ) ` , but automatically creates a new FileLogger
instead of having one passed .
Note that the logger stays alive ( in memory ) forever . If you need
to control the lifetime of a logger , use : class : ` log _ to ( ) ` instead ."""
|
logger = FileLogger ( logdir , mode , delete , clearmem )
_loggers . append ( logger )
return log_to ( logger )
|
def get_track_info ( track_id ) :
"""Fetches track info from Soundcloud , given a track _ id"""
|
logger . info ( 'Retrieving more info on the track' )
info_url = url [ "trackinfo" ] . format ( track_id )
r = requests . get ( info_url , params = { 'client_id' : CLIENT_ID } , stream = True )
item = r . json ( )
logger . debug ( item )
return item
|
def _set_ipv6_routes ( self , v , load = False ) :
"""Setter method for ipv6 _ routes , mapped from YANG variable / isis _ state / ipv6 _ routes ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ ipv6 _ routes is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ ipv6 _ routes ( ) directly .
YANG Description : ISIS IPv6 Route Table"""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = ipv6_routes . ipv6_routes , is_container = 'container' , presence = False , yang_name = "ipv6-routes" , rest_name = "ipv6-routes" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'isis-ipv6-route-table' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-isis-operational' , defining_module = 'brocade-isis-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """ipv6_routes must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=ipv6_routes.ipv6_routes, is_container='container', presence=False, yang_name="ipv6-routes", rest_name="ipv6-routes", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-ipv6-route-table', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""" , } )
self . __ipv6_routes = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def get_appapi_params ( self , prepay_id , timestamp = None , nonce_str = None ) :
"""获取 APP 支付参数
: param prepay _ id : 统一下单接口返回的 prepay _ id 参数值
: param timestamp : 可选 , 时间戳 , 默认为当前时间戳
: param nonce _ str : 可选 , 随机字符串 , 默认自动生成
: return : 签名"""
|
data = { 'appid' : self . appid , 'partnerid' : self . mch_id , 'prepayid' : prepay_id , 'package' : 'Sign=WXPay' , 'timestamp' : timestamp or to_text ( int ( time . time ( ) ) ) , 'noncestr' : nonce_str or random_string ( 32 ) }
sign = calculate_signature ( data , self . _client . api_key )
data [ 'sign' ] = sign
return data
|
def find_column ( t ) :
"""Get cursor position , based on previous newline"""
|
pos = t . lexer . lexpos
data = t . lexer . lexdata
last_cr = data . rfind ( '\n' , 0 , pos )
if last_cr < 0 :
last_cr = - 1
column = pos - last_cr
return column
|
def AddPerformanceOptions ( self , argument_group ) :
"""Adds the performance options to the argument group .
Args :
argument _ group ( argparse . _ ArgumentGroup ) : argparse argument group ."""
|
argument_group . add_argument ( '--buffer_size' , '--buffer-size' , '--bs' , dest = 'buffer_size' , action = 'store' , default = 0 , help = ( 'The buffer size for the output (defaults to 196MiB).' ) )
argument_group . add_argument ( '--queue_size' , '--queue-size' , dest = 'queue_size' , action = 'store' , default = 0 , help = ( 'The maximum number of queued items per worker ' '(defaults to {0:d})' ) . format ( self . _DEFAULT_QUEUE_SIZE ) )
|
def get_mutation ( self , stage , data ) :
'''Get the next mutation , if in the correct stage
: param stage : current stage of the stack
: param data : a dictionary of items to pass to the model
: return : mutated payload if in apropriate stage , None otherwise'''
|
payload = None
# Commented out for now : we want to return the same
# payload - while inside the same test
# if self . _ keep _ running ( ) and self . _ do _ fuzz . is _ set ( ) :
if self . _keep_running ( ) :
fuzz_node = self . _fuzz_path [ self . _index_in_path ] . dst
if self . _should_fuzz_node ( fuzz_node , stage ) :
fuzz_node . set_session_data ( data )
payload = fuzz_node . render ( ) . tobytes ( )
self . _last_payload = payload
else :
self . _update_path_index ( stage )
if payload :
self . _notify_mutated ( )
self . _requested_stages . append ( ( stage , payload ) )
return payload
|
def dvnorm ( state ) :
"""Function to calculate the derivative of the norm of a 3 - vector .
http : / / naif . jpl . nasa . gov / pub / naif / toolkit _ docs / C / cspice / dvnorm _ c . html
: param state :
A 6 - vector composed of three coordinates and their derivatives .
: type state : 6 - Element Array of floats
: return : The derivative of the norm of a 3 - vector .
: rtype : float"""
|
assert len ( state ) is 6
state = stypes . toDoubleVector ( state )
return libspice . dvnorm_c ( state )
|
def _do_customer_service ( self ) :
"""This method is called before the shutdown of the scheduler .
If customer _ service is on and the flow didn ' t completed successfully ,
a lightweight tarball file with inputs and the most important output files
is created in customer _ servide _ dir ."""
|
if self . customer_service_dir is None :
return
doit = self . exceptions or not self . flow . all_ok
doit = True
if not doit :
return
prefix = os . path . basename ( self . flow . workdir ) + "_"
import tempfile , datetime
suffix = str ( datetime . datetime . now ( ) ) . replace ( " " , "-" )
# Remove milliseconds
i = suffix . index ( "." )
if i != - 1 :
suffix = suffix [ : i ]
suffix += ".tar.gz"
# back = os . getcwd ( )
# os . chdir ( self . customer _ service _ dir . path )
_ , tmpname = tempfile . mkstemp ( suffix = "_" + suffix , prefix = prefix , dir = self . customer_service_dir . path , text = False )
print ( "Dear customer,\n We are about to generate a tarball in\n %s" % tmpname )
self . flow . make_light_tarfile ( name = tmpname )
|
def almost_identity ( gate : Gate ) -> bool :
"""Return true if gate tensor is ( almost ) the identity"""
|
N = gate . qubit_nb
return np . allclose ( asarray ( gate . asoperator ( ) ) , np . eye ( 2 ** N ) )
|
def sort_dict_by_key ( obj ) :
"""Sort dict by its keys
> > > sort _ dict _ by _ key ( dict ( c = 1 , b = 2 , a = 3 , d = 4 ) )
OrderedDict ( [ ( ' a ' , 3 ) , ( ' b ' , 2 ) , ( ' c ' , 1 ) , ( ' d ' , 4 ) ] )"""
|
sort_func = lambda x : x [ 0 ]
return OrderedDict ( sorted ( obj . items ( ) , key = sort_func ) )
|
def load_dataset ( dataset_key , force_update = False , auto_update = False , profile = 'default' , ** kwargs ) :
"""Load a dataset from the local filesystem , downloading it from data . world
first , if necessary .
This function returns an object of type ` LocalDataset ` . The object
allows access to metedata via it ' s ` describe ( ) ` method and to all the data
via three properties ` raw _ data ` , ` tables ` and ` dataframes ` , all of which
are mappings ( dict - like structures ) .
: param dataset _ key : Dataset identifier , in the form of owner / id or of a url
: type dataset _ key : str
: param force _ update : Flag , indicating if a new copy of the dataset should
be downloaded replacing any previously downloaded copy
( Default value = False )
: type force _ update : bool
: param auto _ update : Flag , indicating that dataset be updated to the latest
version
: type auto _ update : bool
: param profile : Configuration profile ( account ) to use .
( Default value = ' default ' )
: type profile : str , optional
: returns : The object representing the dataset
: rtype : LocalDataset
: raises RestApiError : If a server error occurs
Examples
> > > import datadotworld as dw
> > > dataset = dw . load _ dataset ( ' jonloyens / an - intro - to - dataworld - dataset ' )
> > > list ( dataset . dataframes )
[ ' changelog ' , ' datadotworldbballstats ' , ' datadotworldbballteam ' ]"""
|
return _get_instance ( profile , ** kwargs ) . load_dataset ( dataset_key , force_update = force_update , auto_update = auto_update )
|
def get_by_name ( self , name : str ) -> List [ Account ] :
"""Searches accounts by name"""
|
# return self . query . filter ( Account . name = = name ) . all ( )
return self . get_by_name_from ( self . book . root , name )
|
def add_batch_parser ( subparsers , parent_parser ) :
"""Adds arguments parsers for the batch list , batch show and batch status
commands
Args :
subparsers : Add parsers to this subparser object
parent _ parser : The parent argparse . ArgumentParser object"""
|
parser = subparsers . add_parser ( 'batch' , help = 'Displays information about batches and submit new batches' , description = 'Provides subcommands to display Batch information and ' 'submit Batches to the validator via the REST API.' )
grand_parsers = parser . add_subparsers ( title = 'subcommands' , dest = 'subcommand' )
grand_parsers . required = True
add_batch_list_parser ( grand_parsers , parent_parser )
add_batch_show_parser ( grand_parsers , parent_parser )
add_batch_status_parser ( grand_parsers , parent_parser )
add_batch_submit_parser ( grand_parsers , parent_parser )
|
def getCachedDataKey ( engineVersionHash , key ) :
"""Retrieves the cached data value for the specified engine version hash and dictionary key"""
|
cacheFile = CachedDataManager . _cacheFileForHash ( engineVersionHash )
return JsonDataManager ( cacheFile ) . getKey ( key )
|
def send_email ( sender , receivers , subject , text = None , html = None , charset = 'utf-8' , config = Injected ) :
"""Sends an email .
: param sender : Sender as string or None for default got from config .
: param receivers : String or array of recipients .
: param subject : Subject .
: param text : Plain text message .
: param html : Html message .
: param charset : Charset .
: param config : Current configuration"""
|
smtp_config = config [ 'SMTP' ]
# Receivers must be an array .
if not isinstance ( receivers , list ) and not isinstance ( receivers , tuple ) :
receivers = [ receivers ]
# Create the messages
msgs = [ ]
if text is not None :
msgs . append ( MIMEText ( text , 'plain' , charset ) )
if html is not None :
msgs . append ( MIMEText ( html , 'html' , charset ) )
if len ( msgs ) == 0 :
raise Exception ( "No message is given." )
if len ( msgs ) == 1 :
msg = msgs [ 0 ]
else :
msg = MIMEMultipart ( )
for m in msgs :
msg . attach ( m )
# Default sender .
if sender is None :
sender = smtp_config [ 'SENDER' ]
# Fill the info .
msg [ 'Subject' ] = subject
msg [ 'From' ] = sender
msg [ 'To' ] = ", " . join ( receivers )
# Send .
smtp_server = smtplib . SMTP ( ** ( smtp_config [ 'SERVER' ] ) )
smtp_server . sendmail ( sender , receivers , msg . as_string ( ) )
smtp_server . quit ( )
|
def search_reference_sets ( self , accession = None , md5checksum = None , assembly_id = None ) :
"""Returns an iterator over the ReferenceSets fulfilling the specified
conditions .
: param str accession : If not null , return the reference sets for which
the ` accession ` matches this string ( case - sensitive , exact match ) .
: param str md5checksum : If not null , return the reference sets for
which the ` md5checksum ` matches this string ( case - sensitive , exact
match ) . See : class : ` ga4gh . protocol . ReferenceSet : : md5checksum ` for
details .
: param str assembly _ id : If not null , return the reference sets for
which the ` assembly _ id ` matches this string ( case - sensitive ,
exact match ) .
: return : An iterator over the : class : ` ga4gh . protocol . ReferenceSet `
objects defined by the query parameters ."""
|
request = protocol . SearchReferenceSetsRequest ( )
request . accession = pb . string ( accession )
request . md5checksum = pb . string ( md5checksum )
request . assembly_id = pb . string ( assembly_id )
request . page_size = pb . int ( self . _page_size )
return self . _run_search_request ( request , "referencesets" , protocol . SearchReferenceSetsResponse )
|
def render_impl ( template_file , ctx = None , paths = None , filters = None ) :
""": param template _ file : Absolute or relative path to the template file
: param ctx : Context dict needed to instantiate templates
: param filters : Custom filters to add into template engine
: return : Compiled result ( str )"""
|
env = tmpl_env ( make_template_paths ( template_file , paths ) )
if env is None :
return copen ( template_file ) . read ( )
if filters is not None :
env . filters . update ( filters )
if ctx is None :
ctx = { }
return env . get_template ( os . path . basename ( template_file ) ) . render ( ** ctx )
|
def producer ( id , message_count = 16 ) :
"""Spam the bus with messages including the data id .
: param int id : the id of the thread / process"""
|
with can . Bus ( bustype = 'socketcan' , channel = 'vcan0' ) as bus :
for i in range ( message_count ) :
msg = can . Message ( arbitration_id = 0x0cf02200 + id , data = [ id , i , 0 , 1 , 3 , 1 , 4 , 1 ] )
bus . send ( msg )
sleep ( 1.0 )
print ( "Producer #{} finished sending {} messages" . format ( id , message_count ) )
|
def get_first_row ( dbconn , tablename , n = 1 , uuid = None ) :
"""Returns the first ` n ` rows in the table"""
|
return fetch ( dbconn , tablename , n , uuid , end = False )
|
def _set_prefixes ( self , conf ) :
"""Set the graphite key prefixes
: param dict conf : The configuration data"""
|
if conf . get ( 'legacy_namespace' , 'y' ) in self . TRUE_VALUES :
self . count_prefix = 'stats_counts'
self . count_suffix = ''
self . gauge_prefix = 'stats.gauges'
self . timer_prefix = 'stats.timers'
self . rate_prefix = 'stats'
self . rate_suffix = ''
else :
global_prefix = conf . get ( 'global_prefix' , 'stats' )
self . count_prefix = '%s.%s' % ( global_prefix , conf . get ( 'prefix_counter' , 'counters' ) )
self . count_suffix = '.count'
self . gauge_prefix = '%s.%s' % ( global_prefix , conf . get ( 'prefix_gauge' , 'gauges' ) )
self . timer_prefix = '%s.%s' % ( global_prefix , conf . get ( 'prefix_timer' , 'timers' ) )
self . rate_prefix = self . count_prefix
self . rate_suffix = '.rate'
|
def upload ( self , bug : Bug ) -> bool :
"""Attempts to upload the Docker image for a given bug to
` DockerHub < https : / / hub . docker . com > ` _ ."""
|
return self . __installation . build . upload ( bug . image )
|
def woa_profile_from_dap ( var , d , lat , lon , depth , cfg ) :
"""Monthly Climatologic Mean and Standard Deviation from WOA ,
used either for temperature or salinity .
INPUTS
time : [ day of the year ]
lat : [ - 90 < lat < 90]
lon : [ - 180 < lon < 180]
depth : [ meters ]
Reads the WOA Monthly Climatology NetCDF file and
returns the corresponding WOA values of salinity or temperature mean and
standard deviation for the given time , lat , lon , depth ."""
|
if lon < 0 :
lon = lon + 360
url = cfg [ 'url' ]
doy = int ( d . strftime ( '%j' ) )
dataset = open_url ( url )
dn = ( np . abs ( doy - dataset [ 'time' ] [ : ] ) ) . argmin ( )
xn = ( np . abs ( lon - dataset [ 'lon' ] [ : ] ) ) . argmin ( )
yn = ( np . abs ( lat - dataset [ 'lat' ] [ : ] ) ) . argmin ( )
if re . match ( "temperature\d?$" , var ) :
mn = ma . masked_values ( dataset . t_mn . t_mn [ dn , : , yn , xn ] . reshape ( dataset [ 'depth' ] . shape [ 0 ] ) , dataset . t_mn . attributes [ '_FillValue' ] )
sd = ma . masked_values ( dataset . t_sd . t_sd [ dn , : , yn , xn ] . reshape ( dataset [ 'depth' ] . shape [ 0 ] ) , dataset . t_sd . attributes [ '_FillValue' ] )
# se = ma . masked _ values ( dataset . t _ se . t _ se [ dn , : , yn , xn ] . reshape (
# dataset [ ' depth ' ] . shape [ 0 ] ) , dataset . t _ se . attributes [ ' _ FillValue ' ] )
# Use this in the future . A minimum # of samples
# dd = ma . masked _ values ( dataset . t _ dd . t _ dd [ dn , : , yn , xn ] . reshape (
# dataset [ ' depth ' ] . shape [ 0 ] ) , dataset . t _ dd . attributes [ ' _ FillValue ' ] )
elif re . match ( "salinity\d?$" , var ) :
mn = ma . masked_values ( dataset . s_mn . s_mn [ dn , : , yn , xn ] . reshape ( dataset [ 'depth' ] . shape [ 0 ] ) , dataset . s_mn . attributes [ '_FillValue' ] )
sd = ma . masked_values ( dataset . s_sd . s_sd [ dn , : , yn , xn ] . reshape ( dataset [ 'depth' ] . shape [ 0 ] ) , dataset . s_sd . attributes [ '_FillValue' ] )
# dd = ma . masked _ values ( dataset . s _ dd . s _ dd [ dn , : , yn , xn ] . reshape (
# dataset [ ' depth ' ] . shape [ 0 ] ) , dataset . s _ dd . attributes [ ' _ FillValue ' ] )
zwoa = ma . array ( dataset . depth [ : ] )
ind = ( depth <= zwoa . max ( ) ) & ( depth >= zwoa . min ( ) )
# Mean value profile
f = interp1d ( zwoa [ ~ ma . getmaskarray ( mn ) ] . compressed ( ) , mn . compressed ( ) )
mn_interp = ma . masked_all ( depth . shape )
mn_interp [ ind ] = f ( depth [ ind ] )
# The stdev profile
f = interp1d ( zwoa [ ~ ma . getmaskarray ( sd ) ] . compressed ( ) , sd . compressed ( ) )
sd_interp = ma . masked_all ( depth . shape )
sd_interp [ ind ] = f ( depth [ ind ] )
output = { 'woa_an' : mn_interp , 'woa_sd' : sd_interp }
return output
|
def fold_xor ( bloomfilter , # type : bitarray
folds # type : int
) : # type : ( . . . ) - > bitarray
"""Performs XOR folding on a Bloom filter .
If the length of the original Bloom filter is n and we perform
r folds , then the length of the resulting filter is n / 2 * * r .
: param bloomfilter : Bloom filter to fold
: param folds : number of folds
: return : folded bloom filter"""
|
if len ( bloomfilter ) % 2 ** folds != 0 :
msg = ( 'The length of the bloom filter is {length}. It is not ' 'divisible by 2 ** {folds}, so it cannot be folded {folds} ' 'times.' . format ( length = len ( bloomfilter ) , folds = folds ) )
raise ValueError ( msg )
for _ in range ( folds ) :
bf1 = bloomfilter [ : len ( bloomfilter ) // 2 ]
bf2 = bloomfilter [ len ( bloomfilter ) // 2 : ]
bloomfilter = bf1 ^ bf2
return bloomfilter
|
def _ask_to_confirm ( self , ui , pac_man , * to_install ) :
"""Return True if user wants to install packages , False otherwise"""
|
ret = DialogHelper . ask_for_package_list_confirm ( ui , prompt = pac_man . get_perm_prompt ( to_install ) , package_list = to_install , )
return bool ( ret )
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.