signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _with_context ( self , * args , ** kwargs ) :
"""As the ` with _ context ` class method but for recordset .""" | context = dict ( args [ 0 ] if args else self . env . context , ** kwargs )
return self . with_env ( self . env ( context = context ) ) |
def _convert_weekday_pattern ( p_weekday ) :
"""Converts a weekday name to an absolute date .
When today ' s day of the week is entered , it will return next week ' s date .""" | day_value = { 'mo' : 0 , 'tu' : 1 , 'we' : 2 , 'th' : 3 , 'fr' : 4 , 'sa' : 5 , 'su' : 6 }
target_day_string = p_weekday [ : 2 ] . lower ( )
target_day = day_value [ target_day_string ]
day = date . today ( ) . weekday ( )
shift = 7 - ( day - target_day ) % 7
return date . today ( ) + timedelta ( shift ) |
def do_export ( self , subcmd , opts , * args ) :
"""Create an unversioned copy of a tree .
usage :
1 . export [ - r REV ] URL [ PATH ]
2 . export [ - r REV ] PATH1 [ PATH2]
1 . Exports a clean directory tree from the repository specified by
URL , at revision REV if it is given , otherwise at HEAD , into
PATH . If PATH is omitted , the last component of the URL is used
for the local directory name .
2 . Exports a clean directory tree from the working copy specified by
PATH1 , at revision REV if it is given , otherwise at WORKING , into
PATH2 . If PATH2 is omitted , the last component of the PATH1 is used
for the local directory name . If REV is not specified , all local
changes will be preserved , but files not under version control will
not be copied .
$ { cmd _ option _ list }""" | print "'svn %s' opts: %s" % ( subcmd , opts )
print "'svn %s' args: %s" % ( subcmd , args ) |
def _parse_udf_descriptors ( self ) : # type : ( ) - > None
'''An internal method to parse the UDF descriptors on the ISO . This should
only be called if it the ISO has a valid UDF Volume Recognition Sequence
at the beginning of the ISO .
Parameters :
None .
Returns :
Nothing .''' | block_size = self . pvd . logical_block_size ( )
# Parse the anchors
anchor_locations = [ ( 256 * block_size , os . SEEK_SET ) , ( - 2048 , os . SEEK_END ) ]
for loc , whence in anchor_locations :
self . _cdfp . seek ( loc , whence )
extent = self . _cdfp . tell ( ) // 2048
anchor_data = self . _cdfp . read ( 2048 )
anchor_tag = udfmod . UDFTag ( )
anchor_tag . parse ( anchor_data , extent )
if anchor_tag . tag_ident != 2 :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF Anchor Tag identifier not 2' )
anchor = udfmod . UDFAnchorVolumeStructure ( )
anchor . parse ( anchor_data , extent , anchor_tag )
self . udf_anchors . append ( anchor )
# Parse the Main Volume Descriptor Sequence
self . _parse_udf_vol_descs ( self . udf_anchors [ 0 ] . main_vd_extent , self . udf_anchors [ 0 ] . main_vd_length , self . udf_main_descs )
# Parse the Reserve Volume Descriptor Sequence
self . _parse_udf_vol_descs ( self . udf_anchors [ 0 ] . reserve_vd_extent , self . udf_anchors [ 0 ] . reserve_vd_length , self . udf_reserve_descs )
# Parse the Logical Volume Integrity Sequence
self . _seek_to_extent ( self . udf_main_descs . logical_volume . integrity_sequence_extent )
integrity_data = self . _cdfp . read ( self . udf_main_descs . logical_volume . integrity_sequence_length )
offset = 0
current_extent = self . udf_main_descs . logical_volume . integrity_sequence_extent
desc_tag = udfmod . UDFTag ( )
desc_tag . parse ( integrity_data [ offset : ] , current_extent )
if desc_tag . tag_ident != 9 :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF Volume Integrity Tag identifier not 9' )
self . udf_logical_volume_integrity . parse ( integrity_data [ offset : offset + 512 ] , current_extent , desc_tag )
offset += block_size
current_extent += 1
desc_tag = udfmod . UDFTag ( )
desc_tag . parse ( integrity_data [ offset : ] , current_extent )
if desc_tag . tag_ident != 8 :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF Logical Volume Integrity Terminator Tag identifier not 8' )
self . udf_logical_volume_integrity_terminator . parse ( current_extent , desc_tag )
# Now look for the File Set Descriptor
current_extent = self . udf_main_descs . partition . part_start_location
self . _seek_to_extent ( current_extent )
# Read the data for the File Set and File Terminator together
file_set_and_term_data = self . _cdfp . read ( 2 * block_size )
desc_tag = udfmod . UDFTag ( )
desc_tag . parse ( file_set_and_term_data [ : block_size ] , 0 )
if desc_tag . tag_ident != 256 :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF File Set Tag identifier not 256' )
self . udf_file_set . parse ( file_set_and_term_data [ : block_size ] , current_extent , desc_tag )
current_extent += 1
desc_tag = udfmod . UDFTag ( )
desc_tag . parse ( file_set_and_term_data [ block_size : ] , current_extent - self . udf_main_descs . partition . part_start_location )
if desc_tag . tag_ident != 8 :
raise pycdlibexception . PyCdlibInvalidISO ( 'UDF File Set Terminator Tag identifier not 8' )
self . udf_file_set_terminator . parse ( current_extent , desc_tag ) |
def can_edit ( self , user ) :
"""Return whether or not ` user ` can make changes to the class .""" | return user . is_admin or not self . is_locked and self in user . admin_for |
def avail_locations ( call = None ) :
'''Return available Linode datacenter locations .
CLI Example :
. . code - block : : bash
salt - cloud - - list - locations my - linode - config
salt - cloud - f avail _ locations my - linode - config''' | if call == 'action' :
raise SaltCloudException ( 'The avail_locations function must be called with -f or --function.' )
response = _query ( 'avail' , 'datacenters' )
ret = { }
for item in response [ 'DATA' ] :
name = item [ 'LOCATION' ]
ret [ name ] = item
return ret |
def _create_dictionary_of_ned_d ( self ) :
"""create a list of dictionaries containing all the rows in the ned _ d catalogue
* * Return : * *
- ` ` dictList ` ` - a list of dictionaries containing all the rows in the ned _ d catalogue
. . todo : :
- update key arguments values and definitions with defaults
- update return values and definitions
- update usage examples and text
- update docstring text
- check sublime snippet exists
- clip any useful text to docs mindmap
- regenerate the docs and check redendering of this docstring""" | self . log . debug ( 'starting the ``_create_dictionary_of_ned_d`` method' )
count = 0
with open ( self . pathToDataFile , 'rb' ) as csvFile :
csvReader = csv . reader ( csvFile , dialect = 'excel' , delimiter = ',' , quotechar = '"' )
totalRows = sum ( 1 for row in csvReader )
csvFile . close ( )
totalCount = totalRows
with open ( self . pathToDataFile , 'rb' ) as csvFile :
csvReader = csv . reader ( csvFile , dialect = 'excel' , delimiter = ',' , quotechar = '"' )
theseKeys = [ ]
dictList = [ ]
for row in csvReader :
if len ( theseKeys ) == 0 :
totalRows -= 1
if "Exclusion Code" in row and "Hubble const." in row :
for i in row :
if i == "redshift (z)" :
theseKeys . append ( "redshift" )
elif i == "Hubble const." :
theseKeys . append ( "hubble_const" )
elif i == "G" :
theseKeys . append ( "galaxy_index_id" )
elif i == "err" :
theseKeys . append ( "dist_mod_err" )
elif i == "D (Mpc)" :
theseKeys . append ( "dist_mpc" )
elif i == "Date (Yr. - 1980)" :
theseKeys . append ( "ref_date" )
elif i == "REFCODE" :
theseKeys . append ( "ref" )
elif i == "Exclusion Code" :
theseKeys . append ( "dist_in_ned_flag" )
elif i == "Adopted LMC modulus" :
theseKeys . append ( "lmc_mod" )
elif i == "m-M" :
theseKeys . append ( "dist_mod" )
elif i == "Notes" :
theseKeys . append ( "notes" )
elif i == "SN ID" :
theseKeys . append ( "dist_derived_from_sn" )
elif i == "method" :
theseKeys . append ( "dist_method" )
elif i == "Galaxy ID" :
theseKeys . append ( "primary_ned_id" )
elif i == "D" :
theseKeys . append ( "dist_index_id" )
else :
theseKeys . append ( i )
continue
if len ( theseKeys ) :
count += 1
if count > 1 : # Cursor up one line and clear line
sys . stdout . write ( "\x1b[1A\x1b[2K" )
if count > totalCount :
count = totalCount
percent = ( float ( count ) / float ( totalCount ) ) * 100.
print "%(count)s / %(totalCount)s (%(percent)1.1f%%) rows added to memory" % locals ( )
rowDict = { }
for t , r in zip ( theseKeys , row ) :
rowDict [ t ] = r
if t == "ref_date" :
try :
rowDict [ t ] = int ( r ) + 1980
except :
rowDict [ t ] = None
if rowDict [ "dist_index_id" ] != "999999" :
dictList . append ( rowDict )
csvFile . close ( )
self . log . debug ( 'completed the ``_create_dictionary_of_ned_d`` method' )
return dictList |
def from_config ( cls , name , config ) :
"""Returns a Configurable instance with the given name and config .
By default this is a simple matter of calling the constructor , but
subclasses that are also ` Pluggable ` instances override this in order
to check that the plugin is installed correctly first .""" | cls . validate_config ( config )
instance = cls ( )
if not instance . name :
instance . name = config . get ( "name" , name )
instance . apply_config ( config )
return instance |
def _make_path ( * parts ) :
"""Create a URL string from parts , omit all ` None ` values and empty strings .
Convert lists and tuples to comma separated values .""" | # TODO : maybe only allow some parts to be lists / tuples ?
return "/" + "/" . join ( # preserve ' , ' and ' * ' in url for nicer URLs in logs
quote_plus ( _escape ( p ) , b",*" ) for p in parts if p not in SKIP_IN_PATH ) |
def offset_gaussian ( data ) :
"""Fit a gaussian model to ` data ` and return its center""" | nbins = 2 * int ( np . ceil ( np . sqrt ( data . size ) ) )
mind , maxd = data . min ( ) , data . max ( )
drange = ( mind - ( maxd - mind ) / 2 , maxd + ( maxd - mind ) / 2 )
histo = np . histogram ( data , nbins , density = True , range = drange )
dx = abs ( histo [ 1 ] [ 1 ] - histo [ 1 ] [ 2 ] ) / 2
hx = histo [ 1 ] [ 1 : ] - dx
hy = histo [ 0 ]
# fit gaussian
gauss = lmfit . models . GaussianModel ( )
pars = gauss . guess ( hy , x = hx )
out = gauss . fit ( hy , pars , x = hx )
return out . params [ "center" ] |
def _update_dprx ( self ) :
"""Update ` dprx ` .""" | super ( ExpCM_fitprefs , self ) . _update_dprx ( )
j = 0
if 'zeta' in self . freeparams :
self . dprx [ 'zeta' ] . fill ( 0 )
for r in range ( self . nsites ) :
for i in range ( N_AA - 1 ) :
zetari = self . zeta [ j ]
for a in range ( i , N_AA ) :
delta_aAx = ( CODON_TO_AA == a ) . astype ( 'float' )
self . dprx [ 'zeta' ] [ j ] [ r ] += ( delta_aAx - ( delta_aAx * self . prx [ r ] ) . sum ( ) ) / ( zetari - int ( i == a ) )
self . dprx [ 'zeta' ] [ j ] *= self . prx [ r ]
j += 1 |
def create ( gandi , name , datacenter , subnet , gateway , background ) :
"""Create a new vlan""" | try :
gandi . datacenter . is_opened ( datacenter , 'iaas' )
except DatacenterLimited as exc :
gandi . echo ( '/!\ Datacenter %s will be closed on %s, ' 'please consider using another datacenter.' % ( datacenter , exc . date ) )
result = gandi . vlan . create ( name , datacenter , subnet , gateway , background )
if background :
gandi . pretty_echo ( result )
return result |
def vcard ( self , qs ) :
"""VCARD format .""" | try :
import vobject
except ImportError :
print ( self . style . ERROR ( "Please install vobject to use the vcard export format." ) )
sys . exit ( 1 )
out = sys . stdout
for ent in qs :
card = vobject . vCard ( )
card . add ( 'fn' ) . value = full_name ( ** ent )
if not ent [ 'last_name' ] and not ent [ 'first_name' ] : # fallback to fullname , if both first and lastname are not declared
card . add ( 'n' ) . value = vobject . vcard . Name ( full_name ( ** ent ) )
else :
card . add ( 'n' ) . value = vobject . vcard . Name ( ent [ 'last_name' ] , ent [ 'first_name' ] )
emailpart = card . add ( 'email' )
emailpart . value = ent [ 'email' ]
emailpart . type_param = 'INTERNET'
out . write ( card . serialize ( ) ) |
def create_aws_lambda ( ctx , bucket , region_name , aws_access_key_id , aws_secret_access_key ) :
"""Creates an AWS Chalice project for deployment to AWS Lambda .""" | from canari . commands . create_aws_lambda import create_aws_lambda
create_aws_lambda ( ctx . project , bucket , region_name , aws_access_key_id , aws_secret_access_key ) |
def slackpkg_update ( self ) :
"""This replace slackpkg ChangeLog . txt file with new
from Slackware official mirrors after update distribution .""" | NEW_ChangeLog_txt = URL ( mirrors ( "ChangeLog.txt" , "" ) ) . reading ( )
if os . path . isfile ( self . meta . slackpkg_lib_path + "ChangeLog.txt.old" ) :
os . remove ( self . meta . slackpkg_lib_path + "ChangeLog.txt.old" )
if os . path . isfile ( self . meta . slackpkg_lib_path + "ChangeLog.txt" ) :
shutil . copy2 ( self . meta . slackpkg_lib_path + "ChangeLog.txt" , self . meta . slackpkg_lib_path + "ChangeLog.txt.old" )
os . remove ( self . meta . slackpkg_lib_path + "ChangeLog.txt" )
with open ( self . meta . slackpkg_lib_path + "ChangeLog.txt" , "w" ) as log :
log . write ( NEW_ChangeLog_txt )
log . close ( ) |
def push ( self ) :
"""Returns a callable that can be used to transmit a message in a push - pull
fashion . Note that the sender function has a ` ` print ` ` like signature ,
with an infinite number of arguments . Each one being a part of the
complete message .
: rtype : function""" | sock = self . __sock ( zmq . PUSH )
return self . __send_function ( sock ) |
def add_libravatar ( generator , metadata ) :
"""Article generator connector for the Libravatar plugin""" | missing = generator . settings . get ( 'LIBRAVATAR_MISSING' )
size = generator . settings . get ( 'LIBRAVATAR_SIZE' )
# # Check the presence of the Email header
if 'email' not in metadata . keys ( ) :
try :
metadata [ 'email' ] = generator . settings . get ( 'AUTHOR_EMAIL' )
except :
pass
# # Add the Libravatar URL
if metadata [ 'email' ] : # # Compose URL using the MD5 hash
# # ( the ascii encoding is necessary for Python3)
email = metadata [ 'email' ] . lower ( ) . encode ( 'ascii' )
md5 = hashlib . md5 ( email ) . hexdigest ( )
url = 'http://cdn.libravatar.org/avatar/' + md5
# # Add eventual " missing picture " option
if missing or size :
url = url + '?'
if missing :
url = url + 'd=' + missing
if size :
url = url + '&'
if size :
url = url + 's=' + str ( size )
# # Add URL to the article ' s metadata
metadata [ 'author_libravatar' ] = url |
def inventory ( self , modules_inventory = False ) :
"""Get chassis inventory .
: param modules _ inventory : True - read modules inventory , false - don ' t read .""" | self . c_info = self . get_attributes ( )
for m_index , m_portcounts in enumerate ( self . c_info [ 'c_portcounts' ] . split ( ) ) :
if int ( m_portcounts ) :
module = XenaModule ( parent = self , index = m_index )
if modules_inventory :
module . inventory ( ) |
def get_key_words ( s , max_words = 50 , weighted = False ) :
"""Determines key words in Chinese text * s * .
The key words are returned in a list . If * weighted * is ` ` True ` ` ,
then each list item is a tuple : ` ` ( word , weight ) ` ` , where
* weight * is a float . If it ' s * False * , then each list item is a string .
This uses the function : func : ` ~ pynlpir . nlpir . GetKeyWords ` to determine
the key words in * s * .
: param s : The Chinese text to analyze . * s * should be Unicode or a UTF - 8
encoded string .
: param int max _ words : The maximum number of key words to find ( defaults to
` ` 50 ` ` ) .
: param bool weighted : Whether or not to return the key words ' weights
( defaults to ` ` True ` ` ) .""" | s = _decode ( s )
logger . debug ( "Searching for up to {}{} key words in: {}." . format ( max_words , ' weighted' if weighted else '' , s ) )
result = nlpir . GetKeyWords ( _encode ( s ) , max_words , weighted )
result = _decode ( result )
logger . debug ( "Finished key word search: {}." . format ( result ) )
logger . debug ( "Formatting key word search results." )
fresult = result . strip ( '#' ) . split ( '#' ) if result else [ ]
if weighted :
weights , words = [ ] , [ ]
for w in fresult :
result = w . split ( '/' )
word , weight = result [ 0 ] , result [ 2 ]
weight = _to_float ( weight )
weights . append ( weight or 0.0 )
words . append ( word )
fresult = zip ( words , weights )
if is_python3 : # Return a list instead of a zip object in Python 3.
fresult = list ( fresult )
logger . debug ( "Key words formatted: {}." . format ( fresult ) )
return fresult |
def transform ( self ) :
"""Return a new transformed matrix .
: return : Returns a new transformed Matrix
: rtype : Matrix""" | t_matrix = Matrix ( self . _rows , self . _columns )
for col_i , col in enumerate ( self . matrix ) :
for row_i , entry in enumerate ( col ) :
t_matrix . set_value ( row_i , col_i , entry )
return t_matrix |
def _pdf ( self , xloc , left , right , cache ) :
"""Probability density function .
Example :
> > > print ( chaospy . Uniform ( ) . pdf ( [ - 0.5 , 0.5 , 1.5 , 2.5 ] ) )
[0 . 1 . 0 . 0 . ]
> > > print ( Mul ( chaospy . Uniform ( ) , 2 ) . pdf ( [ - 0.5 , 0.5 , 1.5 , 2.5 ] ) )
[0 . 0.5 0.5 0 . ]
> > > print ( Mul ( 2 , chaospy . Uniform ( ) ) . pdf ( [ - 0.5 , 0.5 , 1.5 , 2.5 ] ) )
[0 . 0.5 0.5 0 . ]
> > > print ( Mul ( 1 , 1.5 ) . pdf ( [ - 0.5 , 0.5 , 1.5 , 2.5 ] ) ) # Dirac logic
[ 0 . 0 . inf 0 . ]
> > > dist = chaospy . Mul ( [ 2 , 1 ] , chaospy . Iid ( chaospy . Uniform ( ) , 2 ) )
> > > print ( dist . pdf ( [ [ 0.5 , 0.6 , 1.5 ] , [ 0.5 , 0.6 , 1.5 ] ] ) )
[0.5 0.5 0 . ]
> > > dist = chaospy . Mul ( chaospy . Iid ( chaospy . Uniform ( ) , 2 ) , [ 1 , 2 ] )
> > > print ( dist . pdf ( [ [ 0.5 , 0.6 , 1.5 ] , [ 0.5 , 0.6 , 1.5 ] ] ) )
[0.5 0.5 0 . ]""" | left = evaluation . get_forward_cache ( left , cache )
right = evaluation . get_forward_cache ( right , cache )
if isinstance ( left , Dist ) :
if isinstance ( right , Dist ) :
raise evaluation . DependencyError ( "under-defined distribution {} or {}" . format ( left , right ) )
elif not isinstance ( right , Dist ) :
return numpy . inf
else :
if self . matrix :
Ci = numpy . linalg . inv ( left )
xloc = numpy . dot ( Ci , xloc )
else :
left = ( numpy . asfarray ( left ) . T + numpy . zeros ( xloc . shape ) . T ) . T
valids = left != 0
xloc . T [ valids . T ] = xloc . T [ valids . T ] / left . T [ valids . T ]
pdf = evaluation . evaluate_density ( right , xloc , cache = cache )
if self . matrix :
pdf = numpy . dot ( Ci , pdf )
else :
pdf . T [ valids . T ] /= left . T [ valids . T ]
return pdf
if self . matrix :
Ci = numpy . linalg . inv ( right )
xloc = numpy . dot ( xloc . T , Ci ) . T
else :
right = ( numpy . asfarray ( right ) . T + numpy . zeros ( xloc . shape ) . T ) . T
valids = right != 0
xloc . T [ valids . T ] = xloc . T [ valids . T ] / right . T [ valids . T ]
xloc . T [ ~ valids . T ] = numpy . inf
pdf = evaluation . evaluate_density ( left , xloc , cache = cache )
if self . matrix :
pdf = numpy . dot ( pdf . T , Ci ) . T
else :
pdf . T [ valids . T ] /= right . T [ valids . T ]
assert pdf . shape == xloc . shape
return pdf |
def get_volumes ( self ) :
"""Gets a list of all volumes in this disk , including volumes that are contained in other volumes .""" | volumes = [ ]
for v in self . volumes :
volumes . extend ( v . get_volumes ( ) )
return volumes |
def user_line ( self , frame , breakpoint_hits = None ) :
"""This function is called when we stop or break at this line .""" | if not breakpoint_hits :
self . interaction ( frame , None )
else :
commands_result = self . bp_commands ( frame , breakpoint_hits )
if not commands_result :
self . interaction ( frame , None )
else :
doprompt , silent = commands_result
if not silent :
self . print_stack_entry ( self . stack [ self . curindex ] )
if doprompt :
self . _cmdloop ( )
self . forget ( ) |
def tee_output_python ( ) :
"""Duplicate sys . stdout and sys . stderr to new StringIO .""" | buffer = StringIO ( )
out = CapturedStdout ( buffer )
orig_stdout , orig_stderr = sys . stdout , sys . stderr
flush ( )
sys . stdout = TeeingStreamProxy ( sys . stdout , buffer )
sys . stderr = TeeingStreamProxy ( sys . stderr , buffer )
try :
yield out
finally :
flush ( )
out . finalize ( )
sys . stdout , sys . stderr = orig_stdout , orig_stderr |
def compile_initial_state ( self , batch_size : Optional [ int ] = None ) -> Sequence [ tf . Tensor ] :
'''Returns a tuple of tensors representing the initial state fluents .
Args :
batch _ size ( Optional [ int ] ) : The batch size .
Returns :
Sequence [ tf . Tensor ] : A tuple of tensors .''' | with self . graph . as_default ( ) :
with tf . name_scope ( 'initial_state' ) :
self . _initialize_initial_state_fluents ( )
if batch_size is None :
return self . initial_state_fluents
return self . _compile_batch_fluents ( self . initial_state_fluents , batch_size ) |
def interfaces ( self ) :
"""Get the list of network on compute""" | if not self . _interfaces_cache :
response = yield from self . get ( "/network/interfaces" )
self . _interfaces_cache = response . json
return self . _interfaces_cache |
def get_example_from_prop_spec ( self , prop_spec , from_allof = False ) :
"""Return an example value from a property specification .
Args :
prop _ spec : the specification of the property .
from _ allof : whether these properties are part of an
allOf section
Returns :
An example value""" | # Read example directly from ( X - ) Example or Default value
easy_keys = [ 'example' , 'x-example' , 'default' ]
for key in easy_keys :
if key in prop_spec . keys ( ) and self . use_example :
return prop_spec [ key ]
# Enum
if 'enum' in prop_spec . keys ( ) :
return prop_spec [ 'enum' ] [ 0 ]
# From definition
if '$ref' in prop_spec . keys ( ) :
return self . _example_from_definition ( prop_spec )
# Process AllOf section
if 'allOf' in prop_spec . keys ( ) :
return self . _example_from_allof ( prop_spec )
# Complex type
if 'type' not in prop_spec :
return self . _example_from_complex_def ( prop_spec )
# Object - read from properties , without references
if prop_spec [ 'type' ] == 'object' :
example , additional_properties = self . _get_example_from_properties ( prop_spec )
if additional_properties or from_allof :
return example
return [ example ]
# Array
if prop_spec [ 'type' ] == 'array' or ( isinstance ( prop_spec [ 'type' ] , list ) and prop_spec [ 'type' ] [ 0 ] == 'array' ) :
return self . _example_from_array_spec ( prop_spec )
# File
if prop_spec [ 'type' ] == 'file' :
return ( StringIO ( 'my file contents' ) , 'hello world.txt' )
# Date time
if 'format' in prop_spec . keys ( ) and prop_spec [ 'format' ] == 'date-time' :
return self . _get_example_from_basic_type ( 'datetime' ) [ 0 ]
# List
if isinstance ( prop_spec [ 'type' ] , list ) :
return self . _get_example_from_basic_type ( prop_spec [ 'type' ] [ 0 ] ) [ 0 ]
# Default - basic type
logging . info ( "falling back to basic type, no other match found" )
return self . _get_example_from_basic_type ( prop_spec [ 'type' ] ) [ 0 ] |
def creep_data ( data_set = 'creep_rupture' ) :
"""Brun and Yoshida ' s metal creep rupture data .""" | if not data_available ( data_set ) :
download_data ( data_set )
path = os . path . join ( data_path , data_set )
tar_file = os . path . join ( path , 'creeprupt.tar' )
tar = tarfile . open ( tar_file )
print ( 'Extracting file.' )
tar . extractall ( path = path )
tar . close ( )
all_data = np . loadtxt ( os . path . join ( data_path , data_set , 'taka' ) )
y = all_data [ : , 1 : 2 ] . copy ( )
features = [ 0 ]
features . extend ( range ( 2 , 31 ) )
X = all_data [ : , features ] . copy ( )
return data_details_return ( { 'X' : X , 'y' : y } , data_set ) |
def validate ( value ) :
"""checks if given value is a valid country codes
@ param string value
@ return bool""" | if not helpers . has_len ( value ) :
return False
return COUNTRIES . has_key ( str ( value ) . lower ( ) ) |
def save_to ( self , image ) :
"""Save this image to another DockerImage
: param image : DockerImage
: return :""" | if not isinstance ( image , self . __class__ ) :
raise ConuException ( "Invalid target image type" , type ( image ) )
self . copy ( image . name , image . tag , target_transport = image . transport , target_path = image . path , logs = False ) |
def get ( self , sid ) :
"""Constructs a TaskQueueContext
: param sid : The sid
: returns : twilio . rest . taskrouter . v1 . workspace . task _ queue . TaskQueueContext
: rtype : twilio . rest . taskrouter . v1 . workspace . task _ queue . TaskQueueContext""" | return TaskQueueContext ( self . _version , workspace_sid = self . _solution [ 'workspace_sid' ] , sid = sid , ) |
def calculate ( self , msg : array . array ) -> array . array :
"""Get the checksum for a WSP message . There are three hashes possible :
1 ) 4 Bit Checksum - For Switch Telegram ( RORG = 5 or 6 and STATUS = 0x20 or 0x30)
2 ) 8 Bit Checksum : STATUS bit 2 ^ 7 = 0
3 ) 8 Bit CRC : STATUS bit 2 ^ 7 = 1
: param msg : the message without Preamble / SOF and EOF . Message starts with RORG and ends with CRC""" | try :
if self . mode == self . ChecksumMode . auto :
if msg [ 0 : 4 ] == util . hex2bit ( "5" ) or msg [ 0 : 4 ] == util . hex2bit ( "6" ) : # Switch telegram
return self . checksum4 ( msg )
status = msg [ - 16 : - 8 ]
if status [ 0 ] :
return self . crc8 ( msg [ : - 8 ] )
# ignore trailing hash
else :
return self . checksum8 ( msg [ : - 8 ] )
# ignore trailing hash
elif self . mode == self . ChecksumMode . checksum4 :
return self . checksum4 ( msg )
elif self . mode == self . ChecksumMode . checksum8 :
return self . checksum8 ( msg [ : - 8 ] )
elif self . mode == self . ChecksumMode . crc8 :
return self . crc8 ( msg [ : - 8 ] )
except IndexError :
return None |
def number ( self ) :
"""Return a multidigit int or float number .""" | number = ''
while self . char is not None and self . char . isdigit ( ) :
number += self . char
self . advance ( )
if self . char == '.' :
number += self . char
self . advance ( )
while self . char is not None and self . char . isdigit ( ) :
number += self . char
self . advance ( )
token = Token ( Nature . FLOAT_NUMBER , number )
else :
token = Token ( Nature . INT_NUMBER , number )
return token |
def override_operation ( self , operation , reason ) :
"""Re - Classify entry pair .""" | prev_class = ( self . local_classification , self . remote_classification )
prev_op = self . operation
assert operation != prev_op
assert operation in PAIR_OPERATIONS
if self . any_entry . target . synchronizer . verbose > 3 :
write ( "override_operation({}, {}) -> {} ({})" . format ( prev_class , prev_op , operation , reason ) , debug = True , )
self . operation = operation
self . re_class_reason = reason |
def get_child_type_choices ( self , request , action ) :
"""Override choice labels with ` ` verbose _ name ` ` from plugins and sort .""" | # Get choices from the super class to check permissions .
choices = super ( ChildModelPluginPolymorphicParentModelAdmin , self ) . get_child_type_choices ( request , action )
# Update label with verbose name from plugins .
plugins = self . child_model_plugin_class . get_plugins ( )
labels = { }
sort_priorities = { }
if plugins :
for plugin in plugins :
pk = plugin . content_type . pk
labels [ pk ] = capfirst ( plugin . verbose_name )
sort_priorities [ pk ] = getattr ( plugin , 'sort_priority' , labels [ pk ] )
choices = [ ( ctype , labels [ ctype ] ) for ctype , _ in choices ]
return sorted ( choices , cmp = lambda a , b : cmp ( sort_priorities [ a [ 0 ] ] , sort_priorities [ b [ 0 ] ] ) )
return choices |
def asluav_status_encode ( self , LED_status , SATCOM_status , Servo_status , Motor_rpm ) :
'''Extended state information for ASLUAVs
LED _ status : Status of the position - indicator LEDs ( uint8 _ t )
SATCOM _ status : Status of the IRIDIUM satellite communication system ( uint8 _ t )
Servo _ status : Status vector for up to 8 servos ( uint8 _ t )
Motor _ rpm : Motor RPM ( float )''' | return MAVLink_asluav_status_message ( LED_status , SATCOM_status , Servo_status , Motor_rpm ) |
def write_byte ( self , cmd , value ) :
"""Writes an 8 - bit byte to the specified command register""" | self . bus . write_byte_data ( self . address , cmd , value )
self . log . debug ( "write_byte: Wrote 0x%02X to command register 0x%02X" % ( value , cmd ) ) |
def click ( self , target = None , modifiers = "" ) :
"""Moves the cursor to the target location and clicks the default mouse button .""" | if target is None :
target = self . _lastMatch or self
# Whichever one is not None
target_location = None
if isinstance ( target , Pattern ) :
target_location = self . find ( target ) . getTarget ( )
elif isinstance ( target , basestring ) :
target_location = self . find ( target ) . getTarget ( )
elif isinstance ( target , Match ) :
target_location = target . getTarget ( )
elif isinstance ( target , Region ) :
target_location = target . getCenter ( )
elif isinstance ( target , Location ) :
target_location = target
else :
raise TypeError ( "click expected Pattern, String, Match, Region, or Location object" )
if modifiers != "" :
keyboard . keyDown ( modifiers )
Mouse . moveSpeed ( target_location , Settings . MoveMouseDelay )
time . sleep ( 0.1 )
# For responsiveness
if Settings . ClickDelay > 0 :
time . sleep ( min ( 1.0 , Settings . ClickDelay ) )
Settings . ClickDelay = 0.0
Mouse . click ( )
time . sleep ( 0.1 )
if modifiers != 0 :
keyboard . keyUp ( modifiers )
Debug . history ( "Clicked at {}" . format ( target_location ) ) |
def add_edge ( self , u , v , weight = None ) :
"""Add an edge between u and v .
The nodes u and v will be automatically added if they are
not already in the graph .
Parameters
u , v : nodes
Nodes can be any hashable Python object .
weight : int , float ( default = None )
The weight of the edge .
Examples
> > > from pgmpy . base import UndirectedGraph
> > > G = UndirectedGraph ( )
> > > G . add _ nodes _ from ( nodes = [ ' Alice ' , ' Bob ' , ' Charles ' ] )
> > > G . add _ edge ( u = ' Alice ' , v = ' Bob ' )
> > > G . nodes ( )
[ ' Alice ' , ' Bob ' , ' Charles ' ]
> > > G . edges ( )
[ ( ' Alice ' , ' Bob ' ) ]
When the node is not already present in the graph :
> > > G . add _ edge ( u = ' Alice ' , v = ' Ankur ' )
> > > G . nodes ( )
[ ' Alice ' , ' Ankur ' , ' Bob ' , ' Charles ' ]
> > > G . edges ( )
[ ( ' Alice ' , ' Bob ' ) , ( ' Alice ' , ' Ankur ' ) ]
Adding edges with weight :
> > > G . add _ edge ( ' Ankur ' , ' Maria ' , weight = 0.1)
> > > G . edge [ ' Ankur ' ] [ ' Maria ' ]
{ ' weight ' : 0.1}""" | super ( UndirectedGraph , self ) . add_edge ( u , v , weight = weight ) |
def iswhat ( o ) :
"""Returns a dictionary of all possible identity checks available to
: mod : ` inspect ` applied to ` o ` .
Returns :
dict : keys are ` inspect . is * ` function names ; values are ` bool ` results
returned by each of the methods .""" | import inspect
isfs = { n : f for n , f in inspect . getmembers ( inspect ) if n [ 0 : 2 ] == "is" }
return { n : f ( o ) for n , f in isfs . items ( ) } |
def from_string ( cls , model_id , default_project = None ) :
"""Construct a model reference from model ID string .
Args :
model _ id ( str ) :
A model ID in standard SQL format . If ` ` default _ project ` `
is not specified , this must included a project ID , dataset
ID , and model ID , each separated by ` ` . ` ` .
default _ project ( str ) :
Optional . The project ID to use when ` ` model _ id ` ` does not
include a project ID .
Returns :
google . cloud . bigquery . model . ModelReference :
Model reference parsed from ` ` model _ id ` ` .
Raises :
ValueError :
If ` ` model _ id ` ` is not a fully - qualified table ID in
standard SQL format .""" | proj , dset , model = _helpers . _parse_3_part_id ( model_id , default_project = default_project , property_name = "model_id" )
return cls . from_api_repr ( { "projectId" : proj , "datasetId" : dset , "modelId" : model } ) |
def _operator_symbol_handler ( c , ctx ) :
"""Handles operator symbol values within s - expressions .""" | assert c in _OPERATORS
ctx . set_unicode ( )
val = ctx . value
val . append ( c )
c , self = yield
trans = ctx . immediate_transition ( self )
while c in _OPERATORS :
val . append ( c )
c , _ = yield trans
yield ctx . event_transition ( IonEvent , IonEventType . SCALAR , IonType . SYMBOL , val . as_symbol ( ) ) |
def create_free_shipping_coupon ( cls , free_shipping_coupon , ** kwargs ) :
"""Create FreeShippingCoupon
Create a new FreeShippingCoupon
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async = True
> > > thread = api . create _ free _ shipping _ coupon ( free _ shipping _ coupon , async = True )
> > > result = thread . get ( )
: param async bool
: param FreeShippingCoupon free _ shipping _ coupon : Attributes of freeShippingCoupon to create ( required )
: return : FreeShippingCoupon
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async' ) :
return cls . _create_free_shipping_coupon_with_http_info ( free_shipping_coupon , ** kwargs )
else :
( data ) = cls . _create_free_shipping_coupon_with_http_info ( free_shipping_coupon , ** kwargs )
return data |
def repositories ( self ) :
"""Get dependencies by repositories""" | if self . repo == "sbo" :
self . sbo_case_insensitive ( )
self . find_pkg = sbo_search_pkg ( self . name )
if self . find_pkg :
self . dependencies_list = Requires ( self . flag ) . sbo ( self . name )
else :
PACKAGES_TXT = Utils ( ) . read_file ( self . meta . lib_path + "{0}_repo/PACKAGES.TXT" . format ( self . repo ) )
self . names = Utils ( ) . package_name ( PACKAGES_TXT )
self . bin_case_insensitive ( )
self . find_pkg = search_pkg ( self . name , self . repo )
if self . find_pkg :
self . black = BlackList ( ) . packages ( self . names , self . repo )
self . dependencies_list = Dependencies ( self . repo , self . black ) . binary ( self . name , self . flag ) |
def on_message ( self , message_id_service , contact_id_service , content ) :
"""To use as callback in message service backend""" | try :
live_chat = Chat . live . get ( Q ( agent__id_service = contact_id_service ) | Q ( asker__id_service = contact_id_service ) )
except ObjectDoesNotExist :
self . _new_chat_processing ( message_id_service , contact_id_service , content )
else :
live_chat . handle_message ( message_id_service , contact_id_service , content , self ) |
def symmetric_rescaling ( A , copy = True ) :
"""Scale the matrix symmetrically .
A = D ^ { - 1/2 } A D ^ { - 1/2}
where D = diag ( A ) .
The left multiplication is accomplished through scale _ rows and the right
multiplication is done through scale columns .
Parameters
A : sparse matrix
Sparse matrix with N rows
copy : { True , False }
- If copy = True , then the matrix is copied to a new and different return
matrix ( e . g . B = symmetric _ rescaling ( A ) )
- If copy = False , then the matrix is overwritten deeply ( e . g .
symmetric _ rescaling ( A , copy = False ) overwrites A )
Returns
D _ sqrt : array
Array of sqrt ( diag ( A ) )
D _ sqrt _ inv : array
Array of 1 / sqrt ( diag ( A ) )
DAD : csr _ matrix
Symmetrically scaled A
Notes
- if A is not csr , it is converted to csr and sent to scale _ rows
Examples
> > > import numpy as np
> > > from scipy . sparse import spdiags
> > > from pyamg . util . utils import symmetric _ rescaling
> > > n = 5
> > > e = np . ones ( ( n , 1 ) ) . ravel ( )
> > > data = [ - 1 * e , 2 * e , - 1 * e ]
> > > A = spdiags ( data , [ - 1,0,1 ] , n , n ) . tocsr ( )
> > > Ds , Dsi , DAD = symmetric _ rescaling ( A )
> > > print DAD . todense ( )
[ [ 1 . - 0.5 0 . 0 . 0 . ]
[ - 0.5 1 . - 0.5 0 . 0 . ]
[ 0 . - 0.5 1 . - 0.5 0 . ]
[ 0 . 0 . - 0.5 1 . - 0.5]
[ 0 . 0 . 0 . - 0.5 1 . ] ]""" | if isspmatrix_csr ( A ) or isspmatrix_csc ( A ) or isspmatrix_bsr ( A ) :
if A . shape [ 0 ] != A . shape [ 1 ] :
raise ValueError ( 'expected square matrix' )
D = diag_sparse ( A )
mask = ( D != 0 )
if A . dtype != complex :
D_sqrt = np . sqrt ( abs ( D ) )
else : # We can take square roots of negative numbers
D_sqrt = np . sqrt ( D )
D_sqrt_inv = np . zeros_like ( D_sqrt )
D_sqrt_inv [ mask ] = 1.0 / D_sqrt [ mask ]
DAD = scale_rows ( A , D_sqrt_inv , copy = copy )
DAD = scale_columns ( DAD , D_sqrt_inv , copy = False )
return D_sqrt , D_sqrt_inv , DAD
else :
return symmetric_rescaling ( csr_matrix ( A ) ) |
def check_installation ( cur_file ) :
"""Warn user if running cleverhans from a different directory than tutorial .""" | cur_dir = os . path . split ( os . path . dirname ( os . path . abspath ( cur_file ) ) ) [ 0 ]
ch_dir = os . path . split ( cleverhans . __path__ [ 0 ] ) [ 0 ]
if cur_dir != ch_dir :
warnings . warn ( "It appears that you have at least two versions of " "cleverhans installed, one at %s and one at" " %s. You are running the tutorial script from the " "former but python imported the library module from the " "latter. This may cause errors, for example if the tutorial" " version is newer than the library version and attempts to" " call new features." % ( cur_dir , ch_dir ) ) |
def run_simulations ( self , parameter_list , data_folder ) :
"""This function runs multiple simulations in parallel .""" | # Open up a session
s = drmaa . Session ( )
s . initialize ( )
# Create a job template for each parameter combination
jobs = { }
for parameter in parameter_list : # Initialize result
current_result = { 'params' : { } , 'meta' : { } }
current_result [ 'params' ] . update ( parameter )
command = " " . join ( [ self . script_executable ] + [ '--%s=%s' % ( param , value ) for param , value in parameter . items ( ) ] )
# Run from dedicated temporary folder
current_result [ 'meta' ] [ 'id' ] = str ( uuid . uuid4 ( ) )
temp_dir = os . path . join ( data_folder , current_result [ 'meta' ] [ 'id' ] )
if not os . path . exists ( temp_dir ) :
os . makedirs ( temp_dir )
jt = s . createJobTemplate ( )
jt . remoteCommand = os . path . dirname ( os . path . abspath ( __file__ ) ) + '/run_program.sh'
jt . args = [ command ]
jt . jobEnvironment = self . environment
jt . workingDirectory = temp_dir
jt . nativeSpecification = SIMULATION_GRID_PARAMS
output_filename = os . path . join ( temp_dir , 'stdout' )
error_filename = os . path . join ( temp_dir , 'stderr' )
jt . outputPath = ':' + output_filename
jt . errorPath = ':' + error_filename
jobid = s . runJob ( jt )
# Save the template in our dictionary
jobs [ jobid ] = { 'template' : jt , 'result' : current_result , 'output' : output_filename , 'error' : error_filename , }
# Check for job completion , yield results when they are ready
try :
while len ( jobs ) :
found_done = False
for curjob in jobs . keys ( ) :
try :
status = s . jobStatus ( curjob )
except drmaa . errors . DrmCommunicationException :
pass
if status is drmaa . JobState . DONE :
current_result = jobs [ curjob ] [ 'result' ]
# TODO Actually compute time elapsed in the running
# state
current_result [ 'meta' ] [ 'elapsed_time' ] = 0
try :
s . deleteJobTemplate ( jobs [ curjob ] [ 'template' ] )
except drmaa . errors . DrmCommunicationException :
pass
del jobs [ curjob ]
found_done = True
yield current_result
break
if not found_done : # Sleep if we can ' t find a completed task
time . sleep ( 6 )
finally :
try :
for v in jobs . values ( ) :
s . deleteJobTemplate ( v [ 'template' ] )
s . control ( drmaa . JOB_IDS_SESSION_ALL , drmaa . JobControlAction . TERMINATE )
s . synchronize ( [ drmaa . JOB_IDS_SESSION_ALL ] , dispose = True )
s . exit ( )
except ( drmaa . errors . NoActiveSessionException ) :
pass |
def astype ( self , type_name ) :
"""Convert AstroImage object to some other kind of object .""" | if type_name == 'nddata' :
return self . as_nddata ( )
if type_name == 'hdu' :
return self . as_hdu ( )
raise ValueError ( "Unrecognized conversion type '%s'" % ( type_name ) ) |
def fetch ( ** kwargs ) :
'''. . versionadded : : 2016.3.4
freebsd - update fetch wrapper . Based on the currently installed world and the
configuration options set , fetch all available binary updates .
kwargs :
Parameters of freebsd - update command .''' | # fetch continues when no controlling terminal is present
pre = ''
post = ''
run_args = { }
if float ( __grains__ [ 'osrelease' ] ) >= 10.2 :
post += '--not-running-from-cron'
else :
pre += ' env PAGER=cat'
run_args [ 'python_shell' ] = True
return _wrapper ( 'fetch' , pre = pre , post = post , run_args = run_args , ** kwargs ) |
def device_measurement ( device , ts = None , part = None , result = None , code = None , ** kwargs ) :
"""Returns a JSON MeasurementPayload ready to be send through a
transport .
If ` ts ` is not given , the current time is used . ` part ` is an
optional ` Part ` object , and ` result ` and ` code ` are the respective
fields of the ` Measurement ` object . All other arguments are
interpreted as dimensions .
Minimal example , using a ` Device ` object to send two
measurements :
> > > d = Device ( " 12345 " )
> > > def publish ( msg ) :
. . . pass
> > > publish ( d . measurement ( temperature = 22.8 ) )
> > > publish ( d . measurement ( pressure = 4.1 ) )""" | if ts is None :
ts = local_now ( )
payload = MeasurementPayload ( device = device , part = part )
m = Measurement ( ts , result , code , list ( kwargs ) )
payload . measurements . append ( m )
m . add_sample ( ts , ** kwargs )
return dumps ( payload ) |
def get_item ( self , identifier , item_metadata = None , request_kwargs = None ) :
"""A method for creating : class : ` internetarchive . Item < Item > ` and
: class : ` internetarchive . Collection < Collection > ` objects .
: type identifier : str
: param identifier : A globally unique Archive . org identifier .
: type item _ metadata : dict
: param item _ metadata : ( optional ) A metadata dict used to initialize the Item or
Collection object . Metadata will automatically be retrieved
from Archive . org if nothing is provided .
: type request _ kwargs : dict
: param request _ kwargs : ( optional ) Keyword arguments to be used in
: meth : ` requests . sessions . Session . get ` request .""" | request_kwargs = { } if not request_kwargs else request_kwargs
if not item_metadata :
logger . debug ( 'no metadata provided for "{0}", ' 'retrieving now.' . format ( identifier ) )
item_metadata = self . get_metadata ( identifier , request_kwargs )
mediatype = item_metadata . get ( 'metadata' , { } ) . get ( 'mediatype' )
try :
item_class = self . ITEM_MEDIATYPE_TABLE . get ( mediatype , Item )
except TypeError :
item_class = Item
return item_class ( self , identifier , item_metadata ) |
def get_reqv ( self ) :
""": returns : an instance of class : ` RjbEquivalent ` if reqv _ hdf5 is set""" | if 'reqv' not in self . inputs :
return
return { key : valid . RjbEquivalent ( value ) for key , value in self . inputs [ 'reqv' ] . items ( ) } |
def find_time_base ( self , gps , first_ms_stamp ) :
'''work out time basis for the log - new style''' | t = self . _gpsTimeToTime ( gps . Week , gps . TimeMS )
self . set_timebase ( t - gps . T * 0.001 )
self . timestamp = self . timebase + first_ms_stamp * 0.001 |
def _update_control_section ( self ) :
"""private method to synchronize the control section counters with the
various parts of the control file . This is usually called during the
Pst . write ( ) method .""" | self . control_data . npar = self . npar
self . control_data . nobs = self . nobs
self . control_data . npargp = self . parameter_groups . shape [ 0 ]
self . control_data . nobsgp = self . observation_data . obgnme . value_counts ( ) . shape [ 0 ] + self . prior_information . obgnme . value_counts ( ) . shape [ 0 ]
self . control_data . nprior = self . prior_information . shape [ 0 ]
self . control_data . ntplfle = len ( self . template_files )
self . control_data . ninsfle = len ( self . instruction_files )
self . control_data . numcom = len ( self . model_command ) |
def answer_callback_query ( self , callback_query_id , text = None , show_alert = None , url = None , cache_time = None ) :
"""Use this method to send answers to callback queries sent from inline keyboards . The answer will be displayed to
the user as a notification at the top of the chat screen or as an alert .
: param callback _ query _ id :
: param text :
: param show _ alert :
: return :""" | return apihelper . answer_callback_query ( self . token , callback_query_id , text , show_alert , url , cache_time ) |
def broadcast ( cls , message ) :
"""broadcast message to all connected clients""" | clients = cls . get_clients ( )
# loop over every client and send message
for id , client in clients . iteritems ( ) :
client . send_message ( message ) |
def add_drop_down ( self , col_number , col_label ) :
"""Add a correctly formatted drop - down - menu for given col _ label ,
if required or suggested .
Otherwise do nothing .
Parameters
col _ number : int
grid position at which to add a drop down menu
col _ label : str
column name""" | if col_label . endswith ( '**' ) or col_label . endswith ( '^^' ) :
col_label = col_label [ : - 2 ]
# add drop - down for experiments
if col_label == "experiments" :
if 'measurements' in self . contribution . tables :
meas_table = self . contribution . tables [ 'measurements' ] . df
if 'experiment' in meas_table . columns :
exps = meas_table [ 'experiment' ] . unique ( )
self . choices [ col_number ] = ( sorted ( exps ) , False )
self . grid . SetColLabelValue ( col_number , col_label + "**" )
return
if col_label == 'method_codes' :
self . add_method_drop_down ( col_number , col_label )
elif col_label == 'magic_method_codes' :
self . add_method_drop_down ( col_number , 'method_codes' )
elif col_label in [ 'specimens' , 'samples' , 'sites' , 'locations' ] :
if col_label in self . contribution . tables :
item_df = self . contribution . tables [ col_label ] . df
item_names = item_df . index . unique ( )
# [ col _ label [ : - 1 ] ] . unique ( )
self . choices [ col_number ] = ( sorted ( item_names ) , False )
elif col_label in [ 'specimen' , 'sample' , 'site' , 'location' ] :
if col_label + "s" in self . contribution . tables :
item_df = self . contribution . tables [ col_label + "s" ] . df
item_names = item_df . index . unique ( )
# [ col _ label [ : - 1 ] ] . unique ( )
self . choices [ col_number ] = ( sorted ( item_names ) , False )
# add vocabularies
if col_label in self . contribution . vocab . suggested :
typ = 'suggested'
elif col_label in self . contribution . vocab . vocabularies :
typ = 'controlled'
else :
return
# add menu , if not already set
if col_number not in list ( self . choices . keys ( ) ) :
if typ == 'suggested' :
self . grid . SetColLabelValue ( col_number , col_label + "^^" )
controlled_vocabulary = self . contribution . vocab . suggested [ col_label ]
else :
self . grid . SetColLabelValue ( col_number , col_label + "**" )
controlled_vocabulary = self . contribution . vocab . vocabularies [ col_label ]
stripped_list = [ ]
for item in controlled_vocabulary :
try :
stripped_list . append ( str ( item ) )
except UnicodeEncodeError : # skips items with non ASCII characters
pass
if len ( stripped_list ) > 100 : # split out the list alphabetically , into a dict of lists { ' A ' : [ ' alpha ' , ' artist ' ] , ' B ' : [ ' beta ' , ' beggar ' ] . . . }
dictionary = { }
for item in stripped_list :
letter = item [ 0 ] . upper ( )
if letter not in list ( dictionary . keys ( ) ) :
dictionary [ letter ] = [ ]
dictionary [ letter ] . append ( item )
stripped_list = dictionary
two_tiered = True if isinstance ( stripped_list , dict ) else False
self . choices [ col_number ] = ( stripped_list , two_tiered )
return |
def kill_process ( process ) :
"""Kill the process group associated with the given process . ( posix )""" | logger = logging . getLogger ( 'xenon' )
logger . info ( 'Terminating Xenon-GRPC server.' )
os . kill ( process . pid , signal . SIGINT )
process . wait ( ) |
def send_lockedtransfer ( transfer_description : TransferDescriptionWithSecretState , channel_state : NettingChannelState , message_identifier : MessageID , block_number : BlockNumber , ) -> SendLockedTransfer :
"""Create a mediated transfer using channel .""" | assert channel_state . token_network_identifier == transfer_description . token_network_identifier
lock_expiration = get_initial_lock_expiration ( block_number , channel_state . reveal_timeout , )
# The payment amount and the fee amount must be included in the locked
# amount , as a guarantee to the mediator that the fee will be claimable
# on - chain .
total_amount = PaymentWithFeeAmount ( transfer_description . amount + transfer_description . allocated_fee , )
lockedtransfer_event = channel . send_lockedtransfer ( channel_state = channel_state , initiator = transfer_description . initiator , target = transfer_description . target , amount = total_amount , message_identifier = message_identifier , payment_identifier = transfer_description . payment_identifier , expiration = lock_expiration , secrethash = transfer_description . secrethash , )
return lockedtransfer_event |
def add_link ( self , link_id = None , dump = True ) :
"""Create a link . By default the link is empty
: param dump : Dump topology to disk""" | if link_id and link_id in self . _links :
return self . _links [ link_id ]
link = UDPLink ( self , link_id = link_id )
self . _links [ link . id ] = link
if dump :
self . dump ( )
return link |
def update_filter ( self , * args , ** kwargs ) :
"""Update the filter
: returns : None
: rtype : None
: raises : NotImplementedError""" | forbidden_statuses = [ ]
if not self . loaded_checkb . isChecked ( ) :
forbidden_statuses . append ( reftrack . Reftrack . LOADED )
if not self . unloaded_checkb . isChecked ( ) :
forbidden_statuses . append ( reftrack . Reftrack . UNLOADED )
if not self . imported_checkb . isChecked ( ) :
forbidden_statuses . append ( reftrack . Reftrack . IMPORTED )
if not self . empty_checkb . isChecked ( ) :
forbidden_statuses . append ( None )
self . proxy . set_forbidden_statuses ( forbidden_statuses )
forbidden_types = [ ]
for typ , cb in self . typecbmap . items ( ) :
if not cb . isChecked ( ) :
forbidden_types . append ( typ )
self . proxy . set_forbidden_types ( forbidden_types )
forbidden_uptodate = [ ]
if not self . old_checkb . isChecked ( ) :
forbidden_uptodate . append ( False )
if not self . newest_checkb . isChecked ( ) :
forbidden_uptodate . append ( True )
self . proxy . set_forbidden_uptodate ( forbidden_uptodate )
forbidden_alien = [ ] if self . alien_checkb . isChecked ( ) else [ True ]
self . proxy . set_forbidden_alien ( forbidden_alien )
self . proxy . setFilterWildcard ( self . search_le . text ( ) ) |
def update_node ( availability = str , node_name = str , role = str , node_id = str , version = int ) :
'''Updates docker swarm nodes / needs to target a manager node / minion
availability
Drain or Active
node _ name
minion / node
role
role of manager or worker
node _ id
The Id and that can be obtained via swarm . node _ ls
version
Is obtained by swarm . node _ ls
CLI Example :
. . code - block : : bash
salt ' * ' swarm . update _ node availability = drain node _ name = minion2 role = worker node _ id = 3k9x7t8m4pel9c0nqr3iajnzp version = 19''' | client = docker . APIClient ( base_url = 'unix://var/run/docker.sock' )
try :
salt_return = { }
node_spec = { 'Availability' : availability , 'Name' : node_name , 'Role' : role }
client . update_node ( node_id = node_id , version = version , node_spec = node_spec )
salt_return . update ( { 'Node Information' : node_spec } )
except TypeError :
salt_return = { }
salt_return . update ( { 'Error' : 'Make sure all args are passed [availability, node_name, role, node_id, version]' } )
return salt_return |
def parse ( self ) :
"""Parse command line arguments and options .
Returns :
Dictionary containing all given command line arguments and options .""" | ( options , args ) = self . parser . parse_args ( )
self . _set_attributes ( args , options )
return self . _create_dictionary ( ) |
def ParseFileObject ( self , parser_mediator , file_object ) :
"""Parses an ASL file - like object .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
file _ object ( dfvfs . FileIO ) : file - like object .
Raises :
UnableToParseFile : when the file cannot be parsed .""" | file_header_map = self . _GetDataTypeMap ( 'asl_file_header' )
try :
file_header , _ = self . _ReadStructureFromFileObject ( file_object , 0 , file_header_map )
except ( ValueError , errors . ParseError ) as exception :
raise errors . UnableToParseFile ( 'Unable to parse file header with error: {0!s}' . format ( exception ) )
if file_header . signature != self . _FILE_SIGNATURE :
raise errors . UnableToParseFile ( 'Invalid file signature.' )
# TODO : generate event for creation time .
file_size = file_object . get_size ( )
if file_header . first_log_entry_offset > 0 :
last_log_entry_offset = 0
file_offset = file_header . first_log_entry_offset
while file_offset < file_size :
last_log_entry_offset = file_offset
try :
file_offset = self . _ParseRecord ( parser_mediator , file_object , file_offset )
except errors . ParseError as exception :
parser_mediator . ProduceExtractionWarning ( 'unable to parse record with error: {0!s}' . format ( exception ) )
return
if file_offset == 0 :
break
if last_log_entry_offset != file_header . last_log_entry_offset :
parser_mediator . ProduceExtractionWarning ( 'last log entry offset does not match value in file header.' ) |
def _fetch_rrd_meta ( self , connection , rrd_path_root , whitelist , field_names , tags ) :
'''Fetch metadata about each RRD in this Cacti DB , returning a list of
tuples of ( hostname , device _ name , rrd _ path )''' | def _in_whitelist ( rrd ) :
path = rrd . replace ( '<path_rra>/' , '' )
for p in whitelist :
if fnmatch ( path , p ) :
return True
return False
c = connection . cursor ( )
and_parameters = " OR " . join ( [ "hsc.field_name = '%s'" % field_name for field_name in field_names ] )
# Check for the existence of the ` host _ snmp _ cache ` table
rrd_query = """
SELECT
h.hostname as hostname,
hsc.field_value as device_name,
dt.data_source_path as rrd_path
FROM data_local dl
JOIN host h on dl.host_id = h.id
JOIN data_template_data dt on dt.local_data_id = dl.id
LEFT JOIN host_snmp_cache hsc on h.id = hsc.host_id
AND dl.snmp_index = hsc.snmp_index
WHERE dt.data_source_path IS NOT NULL
AND dt.data_source_path != ''
AND ({} OR hsc.field_name is NULL) """ . format ( and_parameters )
c . execute ( rrd_query )
res = [ ]
for hostname , device_name , rrd_path in c . fetchall ( ) :
if not whitelist or _in_whitelist ( rrd_path ) :
if hostname in ( 'localhost' , '127.0.0.1' ) :
hostname = self . hostname
rrd_path = rrd_path . replace ( '<path_rra>' , rrd_path_root )
device_name = device_name or None
res . append ( ( hostname , device_name , rrd_path ) )
# Collect stats
num_hosts = len ( set ( [ r [ 0 ] for r in res ] ) )
self . gauge ( 'cacti.rrd.count' , len ( res ) , tags = tags )
self . gauge ( 'cacti.hosts.count' , num_hosts , tags = tags )
return res |
def set_rtscts ( self , enable ) :
'''enable / disable RTS / CTS if applicable''' | try :
self . port . setRtsCts ( enable )
except Exception :
self . port . rtscts = enable
self . rtscts = enable |
def output ( f ) :
"""This decorator allows to choose to return an output as text or to save
it to a file .""" | def wrapper ( self , * args , ** kwargs ) :
try :
text = kwargs . get ( 'text' ) or args [ 0 ]
except IndexError :
text = True
_ = f ( self , * args , ** kwargs )
if text :
return _
elif _ is not None and isinstance ( _ , string_types ) :
filename = "{}.{}" . format ( self . filename , f . __name__ )
while exists ( filename ) :
name , ext = splitext ( filename )
try :
name , i = name . split ( '-' )
i = int ( i ) + 1
except ValueError :
i = 2
filename = "{}-{}" . format ( name , i ) + ext
with open ( filename , 'w' ) as out :
out . write ( _ )
return wrapper |
def _to_user_defined ( pif_obj ) :
"""Read the systems in the PIF to populate the user - defined portion""" | res = { }
# make a read view to flatten the hierarchy
rv = ReadView ( pif_obj )
# Iterate over the keys in the read view
for k in rv . keys ( ) :
name , value = _extract_key_value ( rv [ k ] . raw )
# add any objects that can be extracted
if name and value is not None :
res [ name ] = value
# Grab interesting values not in the ReadView
pif = pif_obj . as_dictionary ( )
elements = { }
if pif . get ( "composition" ) :
for comp in pif [ "composition" ] :
if comp . get ( "actualAtomicPercent" ) :
elements [ comp [ "element" ] ] = float ( comp [ "actualAtomicPercent" ] [ "value" ] )
elif comp . get ( "actualWeightPercent" ) :
elements [ comp [ "element" ] ] = float ( comp [ "actualWeightPercent" ] [ "value" ] )
if elements :
res [ "elemental_percent" ] = elements
elif pif . get ( "chemicalFormula" ) :
symbol = ""
num = ""
# Chemical formulae are comprised of letters , numbers , and potentially characters we don ' t care about
for char in pif [ "chemicalFormula" ] : # Uppercase char indicates beginning of new symbol
if char . isupper ( ) : # If there is already a symbol in holding , process it
if symbol :
try :
elements [ symbol ] = int ( num )
# If num is a float , raises ValueError
except ValueError :
elements [ symbol ] = float ( num ) if num else 1
symbol = ""
num = ""
symbol += char
# Lowercase chars or digits are continuations of a symbol
elif char . islower ( ) :
symbol += char
elif char . isdigit ( ) :
num += char
elif char == "." :
num += char
# All other chars are not useful
if elements :
res [ "elemental_proportion" ] = elements
return res |
def replace_node_status ( self , name , body , ** kwargs ) :
"""replace status of the specified Node
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . replace _ node _ status ( name , body , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the Node ( required )
: param V1Node body : ( required )
: param str pretty : If ' true ' , then the output is pretty printed .
: param str dry _ run : When present , indicates that modifications should not be persisted . An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request . Valid values are : - All : all dry run stages will be processed
: param str field _ manager : fieldManager is a name associated with the actor or entity that is making these changes . The value must be less than or 128 characters long , and only contain printable characters , as defined by https : / / golang . org / pkg / unicode / # IsPrint .
: return : V1Node
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . replace_node_status_with_http_info ( name , body , ** kwargs )
else :
( data ) = self . replace_node_status_with_http_info ( name , body , ** kwargs )
return data |
def get_thumbnail_name ( self , thumbnail_options , transparent = False , high_resolution = False ) :
"""A version of ` ` Thumbnailer . get _ thumbnail _ name ` ` that returns the original
filename to resize .""" | path , filename = os . path . split ( self . name )
basedir = self . thumbnail_basedir
subdir = self . thumbnail_subdir
return os . path . join ( basedir , path , subdir , filename ) |
def from_parmed ( cls , path , * args , ** kwargs ) :
"""Try to load a file automatically with ParmEd . Not guaranteed to work , but
might be useful if it succeeds .
Arguments
path : str
Path to file that ParmEd can load""" | st = parmed . load_file ( path , structure = True , * args , ** kwargs )
box = kwargs . pop ( 'box' , getattr ( st , 'box' , None ) )
velocities = kwargs . pop ( 'velocities' , getattr ( st , 'velocities' , None ) )
positions = kwargs . pop ( 'positions' , getattr ( st , 'positions' , None ) )
return cls ( master = st , topology = st . topology , positions = positions , box = box , velocities = velocities , path = path , ** kwargs ) |
def get_library_instance ( self , library_path , library_name ) :
"""Generate a Library instance from within libraries dictionary tree .""" | if self . is_library_in_libraries ( library_path , library_name ) :
from rafcon . core . states . library_state import LibraryState
return LibraryState ( library_path , library_name , "0.1" )
else :
logger . warning ( "Library manager will not create a library instance which is not in the mounted libraries." ) |
def extend ( self , * bindings ) :
"""Append the given bindings to this keymap .
Arguments :
* bindings ( Binding ) : Bindings to be added .
Returns :
Keymap : self""" | self . _bindings . extend ( self . _preprocess ( bindings ) )
return self |
def _parse_volume_descriptors ( self ) : # type : ( ) - > None
'''An internal method to parse the volume descriptors on an ISO .
Parameters :
None .
Returns :
Nothing .''' | # Ecma - 119 says that the Volume Descriptor set is a sequence of volume
# descriptors recorded in consecutively numbered Logical Sectors
# starting with Logical Sector Number 16 . Since sectors are 2048 bytes
# in length , we start at sector 16 * 2048
# Ecma - 119 , 6.2.1 says that the Volume Space is divided into a System
# Area and a Data Area , where the System Area is in logical sectors 0
# to 15 , and whose contents is not specified by the standard .
self . _cdfp . seek ( 16 * 2048 )
while True : # All volume descriptors are exactly 2048 bytes long
curr_extent = self . _cdfp . tell ( ) // 2048
vd = self . _cdfp . read ( 2048 )
if len ( vd ) != 2048 :
raise pycdlibexception . PyCdlibInvalidISO ( 'Failed to read entire volume descriptor' )
( desc_type , ident ) = struct . unpack_from ( '=B5s' , vd , 0 )
if desc_type not in ( headervd . VOLUME_DESCRIPTOR_TYPE_PRIMARY , headervd . VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR , headervd . VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD , headervd . VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY ) or ident not in ( b'CD001' , b'BEA01' , b'NSR02' , b'TEA01' ) : # We read the next extent , and it wasn ' t a descriptor . Abort
# the loop , remembering to back up the input file descriptor .
self . _cdfp . seek ( - 2048 , os . SEEK_CUR )
break
if desc_type == headervd . VOLUME_DESCRIPTOR_TYPE_PRIMARY :
pvd = headervd . PrimaryOrSupplementaryVD ( headervd . VOLUME_DESCRIPTOR_TYPE_PRIMARY )
pvd . parse ( vd , curr_extent )
self . pvds . append ( pvd )
elif desc_type == headervd . VOLUME_DESCRIPTOR_TYPE_SET_TERMINATOR :
vdst = headervd . VolumeDescriptorSetTerminator ( )
vdst . parse ( vd , curr_extent )
self . vdsts . append ( vdst )
elif desc_type == headervd . VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD : # Both an Ecma - 119 Boot Record and a Ecma - TR 071 UDF - Bridge
# Beginning Extended Area Descriptor have the first byte as 0,
# so we can ' t tell which it is until we look at the next 5
# bytes ( Boot Record will have ' CD001 ' , BEAD will have ' BEA01 ' ) .
if ident == b'CD001' :
br = headervd . BootRecord ( )
br . parse ( vd , curr_extent )
self . brs . append ( br )
elif ident == b'BEA01' :
self . _has_udf = True
self . udf_bea . parse ( vd , curr_extent )
elif ident == b'NSR02' :
self . udf_nsr . parse ( vd , curr_extent )
elif ident == b'TEA01' :
self . udf_tea . parse ( vd , curr_extent )
else : # This isn ' t really possible , since we would have aborted
# the loop above .
raise pycdlibexception . PyCdlibInvalidISO ( 'Invalid volume identification type' )
elif desc_type == headervd . VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY :
svd = headervd . PrimaryOrSupplementaryVD ( headervd . VOLUME_DESCRIPTOR_TYPE_SUPPLEMENTARY )
svd . parse ( vd , curr_extent )
self . svds . append ( svd )
# Since we checked for the valid descriptors above , it is impossible
# to see an invalid desc _ type here , so no check necessary .
# The language in Ecma - 119 , p . 8 , Section 6.7.1 says :
# The sequence shall contain one Primary Volume Descriptor ( see 8.4 ) recorded at least once .
# The important bit there is " at least one " , which means that we have
# to accept ISOs with more than one PVD .
if not self . pvds :
raise pycdlibexception . PyCdlibInvalidISO ( 'Valid ISO9660 filesystems must have at least one PVD' )
self . pvd = self . pvds [ 0 ]
# Make sure any other PVDs agree with the first one .
for pvd in self . pvds [ 1 : ] :
if pvd != self . pvd :
raise pycdlibexception . PyCdlibInvalidISO ( 'Multiple occurrences of PVD did not agree!' )
pvd . root_dir_record = self . pvd . root_dir_record
if not self . vdsts :
raise pycdlibexception . PyCdlibInvalidISO ( 'Valid ISO9660 filesystems must have at least one Volume Descriptor Set Terminator' ) |
def open ( self ) :
"""Open an existing database""" | if self . _table_exists ( ) :
self . mode = "open"
# get table info
self . _get_table_info ( )
return self
else : # table not found
raise IOError , "Table %s doesn't exist" % self . name |
def get_definition ( self ) :
"""Add Definition to XMLBIF
Return
dict : dict of type { variable : definition tag }
Examples
> > > writer = XMLBIFWriter ( model )
> > > writer . get _ definition ( )
{ ' hear - bark ' : < Element DEFINITION at 0x7f1d48977408 > ,
' family - out ' : < Element DEFINITION at 0x7f1d489773c8 > ,
' dog - out ' : < Element DEFINITION at 0x7f1d48977388 > ,
' bowel - problem ' : < Element DEFINITION at 0x7f1d48977348 > ,
' light - on ' : < Element DEFINITION at 0x7f1d48977448 > }""" | cpds = self . model . get_cpds ( )
cpds . sort ( key = lambda x : x . variable )
definition_tag = { }
for cpd in cpds :
definition_tag [ cpd . variable ] = etree . SubElement ( self . network , "DEFINITION" )
etree . SubElement ( definition_tag [ cpd . variable ] , "FOR" ) . text = cpd . variable
for child in sorted ( cpd . variables [ : 0 : - 1 ] ) :
etree . SubElement ( definition_tag [ cpd . variable ] , "GIVEN" ) . text = child
return definition_tag |
def _sector_erase_program_double_buffer ( self , progress_cb = _stub_progress ) :
"""! @ brief Double - buffered program by performing sector erases .""" | actual_sector_erase_count = 0
actual_sector_erase_weight = 0
progress = 0
progress_cb ( 0.0 )
# Fill in same flag for all pages . This is done up front so we ' re not trying
# to read from flash while simultaneously programming it .
progress = self . _scan_pages_for_same ( progress_cb )
# Erase all sectors up front .
self . flash . init ( self . flash . Operation . ERASE )
for sector in self . sector_list :
if sector . are_any_pages_not_same ( ) : # Erase the sector
self . flash . erase_sector ( sector . addr )
# Update progress
progress += sector . erase_weight
if self . sector_erase_weight > 0 :
progress_cb ( float ( progress ) / float ( self . sector_erase_weight ) )
self . flash . uninit ( )
# Set up page and buffer info .
current_buf = 0
next_buf = 1
page , i = self . _next_nonsame_page ( 0 )
# Make sure there are actually pages to program differently from current flash contents .
if page is not None :
self . flash . init ( self . flash . Operation . PROGRAM )
# Load first page buffer
self . flash . load_page_buffer ( current_buf , page . addr , page . data )
while page is not None :
assert page . same is not None
# Kick off this page program .
current_addr = page . addr
current_weight = page . get_program_weight ( )
self . flash . start_program_page_with_buffer ( current_buf , current_addr )
actual_sector_erase_count += 1
actual_sector_erase_weight += page . get_program_weight ( )
# Get next page and load it .
page , i = self . _next_nonsame_page ( i )
if page is not None :
self . flash . load_page_buffer ( next_buf , page . addr , page . data )
# Wait for the program to complete .
result = self . flash . wait_for_completion ( )
if result != 0 :
raise FlashProgramFailure ( 'program_page(0x%x) error: %i' % ( current_addr , result ) , current_addr , result )
# Swap buffers .
current_buf , next_buf = next_buf , current_buf
# Update progress
progress += current_weight
if self . sector_erase_weight > 0 :
progress_cb ( float ( progress ) / float ( self . sector_erase_weight ) )
self . flash . uninit ( )
progress_cb ( 1.0 )
LOG . debug ( "Estimated sector erase programmed page count: %i" , self . sector_erase_count )
LOG . debug ( "Actual sector erase programmed page count: %i" , actual_sector_erase_count )
return FlashBuilder . FLASH_SECTOR_ERASE |
def get_orders ( self , address , chain_name = 'NEO' , contract_version = 'V2' , pair = None , from_epoch_time = None , order_status = None , before_id = None , limit = 50 ) :
"""Function to fetch the order history of the given address .
Execution of this function is as follows : :
get _ orders ( address = neo _ get _ scripthash _ from _ address ( address = address ) )
The expected return result for this function is as follows : :
' id ' : ' 7cbdf481-6acf - 4bf3 - a1ed - 4773f31e6931 ' ,
' blockchain ' : ' neo ' ,
' contract _ hash ' : ' a195c1549e7da61b8da315765a790ac7e7633b82 ' ,
' address ' : ' fea2b883725ef2d194c9060f606cd0a0468a2c59 ' ,
' side ' : ' buy ' ,
' offer _ asset _ id ' : ' c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b ' ,
' want _ asset _ id ' : ' ab38352559b8b203bde5fddfa0b07d8b2525e132 ' ,
' offer _ amount ' : ' 53718500 ' ,
' want _ amount ' : ' 1100000 ' ,
' transfer _ amount ' : ' 0 ' ,
' priority _ gas _ amount ' : ' 0 ' ,
' use _ native _ token ' : True ,
' native _ fee _ transfer _ amount ' : 0,
' deposit _ txn ' : None ,
' created _ at ' : ' 2018-08-03T02:44:47.692Z ' ,
' status ' : ' processed ' ,
' fills ' : [ {
' id ' : ' b6f9e530-60ff - 46ff - 9a71-362097a2025e ' ,
' offer _ hash ' : ' 95b3b03be0bff8f58aa86a8dd599700bbaeaffc05078329d5b726b6b995f4cda ' ,
' offer _ asset _ id ' : ' c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b ' ,
' want _ asset _ id ' : ' ab38352559b8b203bde5fddfa0b07d8b2525e132 ' ,
' fill _ amount ' : ' 47833882 ' ,
' want _ amount ' : ' 979500000 ' ,
' filled _ amount ' : ' ' ,
' fee _ asset _ id ' : ' ab38352559b8b203bde5fddfa0b07d8b2525e132 ' ,
' fee _ amount ' : ' 73462500 ' ,
' price ' : ' 0.00048835 ' ,
' txn ' : None ,
' status ' : ' success ' ,
' created _ at ' : ' 2018-08-03T02:44:47.706Z ' ,
' transaction _ hash ' : ' 694745a09e33845ec008cfb79c73986a556e619799ec73274f82b30d85bda13a '
' makes ' : [ {
' id ' : ' 357088a0 - cc80-49ab - acdd - 980589c2d7d8 ' ,
' offer _ hash ' : ' 420cc85abf02feaceb1bcd91489a0c1949c972d2a9a05ae922fa15d79de80c00 ' ,
' available _ amount ' : ' 0 ' ,
' offer _ asset _ id ' : ' c56f33fc6ecfcd0c225c4ab356fee59390af8560be0e930faebe74a6daff7c9b ' ,
' offer _ amount ' : ' 5884618 ' ,
' want _ asset _ id ' : ' ab38352559b8b203bde5fddfa0b07d8b2525e132 ' ,
' want _ amount ' : ' 120500000 ' ,
' filled _ amount ' : ' 0.0 ' ,
' txn ' : None ,
' cancel _ txn ' : None ,
' price ' : ' 0.000488350041493775933609958506224066390041494 ' ,
' status ' : ' cancelled ' ,
' created _ at ' : ' 2018-08-03T02:44:47.708Z ' ,
' transaction _ hash ' : ' 1afa946546550151bbbd19f197a87cec92e9be58c44ec431cae42076298548b7 ' ,
' trades ' : [ ]
: param address : The ScriptHash of the address to filter orders for .
: type address : str
: param pair : The trading pair to filter order requests on .
: type pair : str
: param chain _ name : The name of the chain to find orders against .
: type chain _ name : str
: param contract _ version : The version of the contract to find orders against .
: type contract _ version : str
: param from _ epoch _ time : Only return orders that are last updated at or after this time .
: type from _ epoch _ time : int
: param order _ status : Only return orders have this status . Possible values are open , cancelled , completed .
: type order _ status : str
: param before _ id : Only return orders that are created before the order with this id .
: type before _ id : str
: param limit : Only return up to this number of orders ( min : 1 , max : 200 , default : 50 ) .
: type limit : int
: return : List of dictionaries containing the orders for the given NEO address and ( optional ) trading pair .""" | api_params = { "address" : address , "contract_hash" : self . get_contracts ( ) [ chain_name . upper ( ) ] [ contract_version . upper ( ) ] , "limit" : limit }
if pair is not None :
api_params [ 'pair' ] = pair
if from_epoch_time is not None :
api_params [ 'from_epoch_time' ] = from_epoch_time
if order_status is not None :
api_params [ 'order_status' ] = order_status
if before_id is not None :
api_params [ 'before_id' ] = before_id
return self . request . get ( path = '/orders' , params = api_params ) |
def _printedby_data ( self , ws ) :
"""Returns a dict that represents the user who prints the ws
Keys : username , fullname , email""" | data = { }
member = self . context . portal_membership . getAuthenticatedMember ( )
if member :
username = member . getUserName ( )
data [ 'username' ] = username
data [ 'fullname' ] = to_utf8 ( self . user_fullname ( username ) )
data [ 'email' ] = to_utf8 ( self . user_email ( username ) )
c = [ x for x in self . bika_setup_catalog ( portal_type = 'LabContact' ) if x . getObject ( ) . getUsername ( ) == username ]
if c :
sf = c [ 0 ] . getObject ( ) . getSignature ( )
if sf :
data [ 'signature' ] = sf . absolute_url ( ) + "/Signature"
return data |
def get_document ( self , doc_url , force_download = False ) :
"""Retrieve the data for the given document from the server
: type doc _ url : String or Document
: param doc _ url : the URL of the document , or a Document object
: type force _ download : Boolean
: param force _ download : True to download from the server
regardless of the cache ' s contents
: rtype : String
: returns : the document data
: raises : APIError if the API request is not successful""" | doc_url = str ( doc_url )
if ( self . use_cache and not force_download and self . cache . has_document ( doc_url ) ) :
doc_data = self . cache . get_document ( doc_url )
else :
doc_data = self . api_request ( doc_url , raw = True )
if self . update_cache :
self . cache . add_document ( doc_url , doc_data )
return doc_data |
def addStampAnnot ( self , rect , stamp = 0 ) :
"""Add a ' rubber stamp ' in a rectangle .""" | CheckParent ( self )
val = _fitz . Page_addStampAnnot ( self , rect , stamp )
if not val :
return
val . thisown = True
val . parent = weakref . proxy ( self )
self . _annot_refs [ id ( val ) ] = val
return val |
def sendline ( self , text ) :
"""Sends an input line to the running program , including os . linesep .
Args :
text ( str ) : The input text to be send .
Raises :
TerminationException : The program terminated before / while / after sending the input .
NestedException : An internal problem occured while waiting for the output .""" | logger . debug ( "Sending input '{0}' to '{1}'" . format ( text , self . name ) )
try :
return self . _spawn . sendline ( text )
except pexpect . exceptions . EOF as e :
logger . debug ( "Raising termination exception." )
raise TerminationException ( instance = self , real_exception = e , output = self . get_output ( ) )
except pexpect . exceptions . TIMEOUT as e :
logger . debug ( "Raising timeout exception." )
raise TimeoutException ( instance = self , real_exception = e , output = self . get_output ( ) )
except Exception as e :
logger . debug ( "Sending input failed: " + str ( e ) )
raise NestedException ( instance = self , real_exception = e , output = self . get_output ( ) ) |
def fit ( self , data , debug = False ) :
"""Fit each of the models in the group .
Parameters
data : pandas . DataFrame
Must have a column with the same name as ` segmentation _ col ` .
debug : bool
If set to true ( default false ) will pass the debug parameter
to model estimation .
Returns
fits : dict of statsmodels . regression . linear _ model . OLSResults
Keys are the segment names .""" | with log_start_finish ( 'fitting models in group {}' . format ( self . name ) , logger ) :
return { name : self . models [ name ] . fit ( df , debug = debug ) for name , df in self . _iter_groups ( data ) } |
def _get_available ( recommended = False , restart = False ) :
'''Utility function to get all available update packages .
Sample return date :
{ ' updatename ' : ' 1.2.3-45 ' , . . . }''' | cmd = [ 'softwareupdate' , '--list' ]
out = salt . utils . mac_utils . execute_return_result ( cmd )
# rexp parses lines that look like the following :
# * Safari6.1.2MountainLion - 6.1.2
# Safari ( 6.1.2 ) , 51679K [ recommended ]
# - iCal - 1.0.2
# iCal , 1.0.2 , 6520K
rexp = re . compile ( '(?m)^ [*|-] ' r'([^ ].*)[\r\n].*\(([^\)]+)' )
if salt . utils . data . is_true ( recommended ) : # rexp parses lines that look like the following :
# * Safari6.1.2MountainLion - 6.1.2
# Safari ( 6.1.2 ) , 51679K [ recommended ]
rexp = re . compile ( '(?m)^ [*] ' r'([^ ].*)[\r\n].*\(([^\)]+)' )
keys = [ 'name' , 'version' ]
_get = lambda l , k : l [ keys . index ( k ) ]
updates = rexp . findall ( out )
ret = { }
for line in updates :
name = _get ( line , 'name' )
version_num = _get ( line , 'version' )
ret [ name ] = version_num
if not salt . utils . data . is_true ( restart ) :
return ret
# rexp parses lines that look like the following :
# * Safari6.1.2MountainLion - 6.1.2
# Safari ( 6.1.2 ) , 51679K [ recommended ] [ restart ]
rexp1 = re . compile ( '(?m)^ [*|-] ' r'([^ ].*)[\r\n].*restart*' )
restart_updates = rexp1 . findall ( out )
ret_restart = { }
for update in ret :
if update in restart_updates :
ret_restart [ update ] = ret [ update ]
return ret_restart |
def get_distributed_seismicity_source_nodes ( source ) :
"""Returns list of nodes of attributes common to all distributed seismicity
source classes
: param source :
Seismic source as instance of : class :
` openquake . hazardlib . source . area . AreaSource ` or : class :
` openquake . hazardlib . source . point . PointSource `
: returns :
List of instances of : class : ` openquake . baselib . node . Node `""" | source_nodes = [ ]
# parse msr
source_nodes . append ( Node ( "magScaleRel" , text = source . magnitude_scaling_relationship . __class__ . __name__ ) )
# Parse aspect ratio
source_nodes . append ( Node ( "ruptAspectRatio" , text = source . rupture_aspect_ratio ) )
# Parse MFD
source_nodes . append ( obj_to_node ( source . mfd ) )
# Parse nodal plane distribution
source_nodes . append ( build_nodal_plane_dist ( source . nodal_plane_distribution ) )
# Parse hypocentral depth distribution
source_nodes . append ( build_hypo_depth_dist ( source . hypocenter_distribution ) )
return source_nodes |
def loop_through_agency ( self ) :
"""Loop through an agency to grab the definitions for its tables .""" | agency = self . agency
with open ( agency + '.txt' ) as f :
data = eval ( f . read ( ) )
for table in data :
for column in data [ table ] :
value_link = data [ table ] [ column ]
data [ table ] [ column ] = self . grab_definition ( value_link )
data = json . dumps ( data )
with open ( agency + '_values.json' , 'w' ) as f :
f . write ( str ( data ) ) |
def patched_packing_env ( env ) :
"""Monkey patch packaging . markers . default _ environment""" | old_env = pkg_resources . packaging . markers . default_environment
new_env = lambda : env
pkg_resources . _vendor . packaging . markers . default_environment = new_env
try :
yield
finally :
pkg_resources . _vendor . packaging . markers . default_environment = old_env |
def override_level_names ( self , mapping ) :
"""Rename level names .
: param mapping : Mapping level names to new ones
: type mapping : dict""" | if not isinstance ( mapping , dict ) :
return
for key , val in mapping . items ( ) :
if key in self . _level_names :
self . _level_names [ key ] = val |
def create ( self , name , * args , ** kwargs ) :
"""Standard task creation , but first check for the existence of the
containers , and raise an exception if they don ' t exist .""" | cont = kwargs . get ( "cont" )
if cont : # Verify that it exists . If it doesn ' t , a NoSuchContainer exception
# will be raised .
api = self . api
rgn = api . region_name
cf = api . identity . object_store [ rgn ] . client
cf . get_container ( cont )
return super ( ImageTasksManager , self ) . create ( name , * args , ** kwargs ) |
def create_package ( self , dirpath ) :
"""Set up a package directory .""" | dirpath = fixpath ( dirpath )
filepath = os . path . join ( dirpath , "__coconut__.py" )
with openfile ( filepath , "w" ) as opened :
writefile ( opened , self . comp . getheader ( "__coconut__" ) ) |
def sbessely ( x , N ) :
"""Returns a vector of spherical bessel functions yn :
x : The argument .
N : values of n will run from 0 to N - 1.""" | out = np . zeros ( N , dtype = np . float64 )
out [ 0 ] = - np . cos ( x ) / x
out [ 1 ] = - np . cos ( x ) / ( x ** 2 ) - np . sin ( x ) / x
for n in xrange ( 2 , N ) :
out [ n ] = ( ( 2.0 * n - 1.0 ) / x ) * out [ n - 1 ] - out [ n - 2 ]
return out |
def _stream ( self ) :
"""Returns a generator of lines instead of a list of lines .""" | if self . _exception :
raise self . _exception
try :
if self . _content :
yield self . _content
else :
args = self . create_args ( )
with self . ctx . connect ( * args , env = self . create_env ( ) , timeout = self . timeout ) as s :
yield s
except StopIteration :
raise
except Exception as ex :
self . _exception = ex
raise ContentException ( str ( ex ) ) |
def _ibis_sqlite_regex_extract ( string , pattern , index ) :
"""Extract match of regular expression ` pattern ` from ` string ` at ` index ` .
Parameters
string : str
pattern : str
index : int
Returns
result : str or None""" | result = re . search ( pattern , string )
if result is not None and 0 <= index <= ( result . lastindex or - 1 ) :
return result . group ( index )
return None |
def transaction ( self , session = None ) :
"""Start a new transaction based on the passed session object . If session
is not passed , then create one and make sure of closing it finally .""" | local_session = None
if session is None :
local_session = session = self . create_scoped_session ( )
try :
yield session
finally : # Since ` ` local _ session ` ` was created locally , close it here itself
if local_session is not None : # but wait !
# http : / / groups . google . com / group / sqlalchemy / browse _ thread / thread / 7c1eb642435adde7
# To workaround this issue with sqlalchemy , we can either :
# 1 ) pass the session object explicitly
# 2 ) do not close the session at all ( bad idea - could lead to memory leaks )
# Till pypm implements atomic transations in client . installer ,
# we retain this hack ( i . e . , we choose ( 2 ) for now )
pass |
def restore_package_version_from_recycle_bin ( self , package_version_details , feed , group_id , artifact_id , version ) :
"""RestorePackageVersionFromRecycleBin .
[ Preview API ] Restore a package version from the recycle bin to its associated feed .
: param : class : ` < MavenRecycleBinPackageVersionDetails > < azure . devops . v5_1 . maven . models . MavenRecycleBinPackageVersionDetails > ` package _ version _ details : Set the ' Deleted ' property to false to restore the package .
: param str feed : Name or ID of the feed .
: param str group _ id : Group ID of the package .
: param str artifact _ id : Artifact ID of the package .
: param str version : Version of the package .""" | route_values = { }
if feed is not None :
route_values [ 'feed' ] = self . _serialize . url ( 'feed' , feed , 'str' )
if group_id is not None :
route_values [ 'groupId' ] = self . _serialize . url ( 'group_id' , group_id , 'str' )
if artifact_id is not None :
route_values [ 'artifactId' ] = self . _serialize . url ( 'artifact_id' , artifact_id , 'str' )
if version is not None :
route_values [ 'version' ] = self . _serialize . url ( 'version' , version , 'str' )
content = self . _serialize . body ( package_version_details , 'MavenRecycleBinPackageVersionDetails' )
self . _send ( http_method = 'PATCH' , location_id = 'f67e10eb-1254-4953-add7-d49b83a16c9f' , version = '5.1-preview.1' , route_values = route_values , content = content ) |
def reset_api_secret ( context , id , etag ) :
"""reset _ api _ secret ( context , id , etag )
Reset a Feeder api _ secret .
> > > dcictl feeder - reset - api - secret [ OPTIONS ]
: param string id : ID of the feeder [ required ]
: param string etag : Entity tag of the feeder resource [ required ]""" | result = feeder . reset_api_secret ( context , id = id , etag = etag )
utils . format_output ( result , context . format , headers = [ 'id' , 'api_secret' , 'etag' ] ) |
def get_modified_files ( ) :
"""Returns a list of all modified files .""" | c = subprocess . Popen ( [ "git" , "diff-index" , "--cached" , "--name-only" , "HEAD" ] , stdout = subprocess . PIPE )
return c . communicate ( ) [ 0 ] . splitlines ( ) |
def _initialize_random_state ( self , seed = None , shared = True , name = None ) :
"""Initialization method to be called in the constructor of
subclasses to initialize the random state correctly .
If seed is None , there is no control over the random stream
( no reproducibility of the stream ) .
If shared is True ( and not time - dependent ) , the random state
is shared across all objects of the given class . This can be
overridden per object by creating new random state to assign
to the random _ generator parameter .""" | if seed is None : # Equivalent to an uncontrolled seed .
seed = random . Random ( ) . randint ( 0 , 1000000 )
suffix = ''
else :
suffix = str ( seed )
# If time _ dependent , independent state required : otherwise
# time - dependent seeding ( via hash ) will affect shared
# state . Note that if all objects have time _ dependent = True
# shared random state is safe and more memory efficient .
if self . time_dependent or not shared :
self . random_generator = type ( self . random_generator ) ( seed )
# Seed appropriately ( if not shared )
if not shared :
self . random_generator . seed ( seed )
if name is None :
self . _verify_constrained_hash ( )
hash_name = name if name else self . name
if not shared :
hash_name += suffix
self . _hashfn = Hash ( hash_name , input_count = 2 )
if self . time_dependent :
self . _hash_and_seed ( ) |
def connect ( self , callback , weak = False ) :
"""Connects a new callback to this signal .
: param callback : The callback to connect .
: param weak : If ` True ` , only holds a weak reference to the specified
callback .
` callback ` will be called whenever ` emit ` gets called on the ` Signal `
instance .
If a weak reference is kept , when the callback gets destroyed , it will
be unregistered from the signal automatically . This can help avoiding
circular references in user - code .
. . warning : :
Beware of bound methods ! Those are generally short - lived and don ' t
play nicely with weak reference .
. . note : :
Connecting the same callback twice or more will cause the callback
to be called several times per ` emit ` call .
You will have to call ` disconnect ` as many times as the ` connect `
call was called to unregister a callback completely .""" | if weak :
callback = ref ( callback , self . _disconnect )
with self . _write_lock :
with self . _read_lock :
self . _callbacks . append ( callback ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.