signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def _config ( self ) :
"""Value to be written to the device ' s config register""" | config = 0
if self . mode == MODE_NORMAL :
config += ( self . _t_standby << 5 )
if self . _iir_filter :
config += ( self . _iir_filter << 2 )
return config |
def daterange_with_details ( value ) :
'''Display a date range in the shorter possible maner .''' | delta = value . end - value . start
start , end = None , None
if is_first_year_day ( value . start ) and is_last_year_day ( value . end ) :
start = value . start . year
if delta . days > 365 :
end = value . end . year
elif is_first_month_day ( value . start ) and is_last_month_day ( value . end ) :
start = short_month ( value . start )
if delta . days > 31 :
end = short_month ( value . end )
else :
start = short_day ( value . start )
if value . start != value . end :
end = short_day ( value . end )
return _ ( '%(start)s to %(end)s' , start = start , end = end ) if end else start |
def _append_pairs ( new_pairs ) :
"""Load the pairlist , add new stuff , save it out""" | desired_pairs = restore_pairs ( ) or [ ]
desired_pairs += new_pairs
print ( "Adding {} new pairs, queue has {} pairs" . format ( len ( new_pairs ) , len ( desired_pairs ) ) )
save_pairs ( desired_pairs ) |
def overall ( self , node ) :
"""Calculate overall size of the node including children and empty space""" | return sum ( [ self . value ( value , node ) for value in self . children ( node ) ] ) |
def get_residue_annotations ( self , seq_resnum , seqprop = None , structprop = None , chain_id = None , use_representatives = False ) :
"""Get all residue - level annotations stored in the SeqProp ` ` letter _ annotations ` ` field for a given residue number .
Uses the representative sequence , structure , and chain ID stored by default . If other properties from other
structures are desired , input the proper IDs . An alignment for the given sequence to the structure must
be present in the sequence _ alignments list .
Args :
seq _ resnum ( int ) : Residue number in the sequence
seqprop ( SeqProp ) : SeqProp object
structprop ( StructProp ) : StructProp object
chain _ id ( str ) : ID of the structure ' s chain to get annotation from
use _ representatives ( bool ) : If the representative sequence / structure / chain IDs should be used
Returns :
dict : All available letter _ annotations for this residue number""" | if use_representatives :
if seqprop and structprop and chain_id :
raise ValueError ( 'Overriding sequence, structure, and chain IDs with representatives. ' 'Set use_representatives to False if custom IDs are to be used.' )
else :
if not seqprop or not structprop or not chain_id :
raise ValueError ( 'Input sequence, structure, and chain to map between, or set use_representatives ' 'to True.' )
if use_representatives :
seqprop = self . representative_sequence
structprop = self . representative_structure
chain_id = self . representative_chain
# log . debug ( ' Using sequence : { } , structure : { } , chain : { } ' . format ( seqprop . id , structprop . id , chain _ id ) )
# Create a new SeqFeature
f = SeqFeature ( FeatureLocation ( seq_resnum - 1 , seq_resnum ) )
# Get sequence properties
seq_features = f . extract ( seqprop )
# Store in dictionary to return , clean it up
all_info = ssbio . utils . clean_single_dict ( indict = seq_features . letter_annotations , prepend_to_keys = 'seq_' , remove_keys_containing = '_chain_index' )
all_info [ 'seq_resnum' ] = seq_resnum
all_info [ 'seq_residue' ] = str ( seq_features . seq )
if structprop :
chain = structprop . chains . get_by_id ( chain_id )
# Get structure properties
mapping_to_structure_resnum = self . map_seqprop_resnums_to_structprop_resnums ( resnums = seq_resnum , seqprop = seqprop , structprop = structprop , chain_id = chain_id , use_representatives = use_representatives )
# Try finding the residue in the structure
if f . location . end . position in mapping_to_structure_resnum :
struct_resnum = mapping_to_structure_resnum [ f . location . end . position ]
struct_f = SeqFeature ( FeatureLocation ( struct_resnum - 1 , struct_resnum ) )
struct_seq_features = struct_f . extract ( chain . seq_record )
struct_info = ssbio . utils . clean_single_dict ( indict = struct_seq_features . letter_annotations , prepend_to_keys = 'struct_' , remove_keys_containing = 'structure_resnums' )
struct_info [ 'struct_resnum' ] = struct_resnum
struct_info [ 'struct_residue' ] = str ( struct_seq_features . seq )
all_info . update ( struct_info )
# Warn if residue differs from sequence
if seq_features . seq != struct_seq_features . seq :
log . warning ( 'Sequence residue ({}{}) does not match structure residue ({}{}). ' 'This may simply be due to differences in the structure' . format ( seq_features . seq , seq_resnum , struct_seq_features . seq , struct_resnum ) )
return all_info |
def post ( self , route , data = None , params = None , follow_redirects = True ) :
"""Send POST request to app .
: param route : route to send request to
: type route : str
: param data : form data to include in request
: type data : dict
: param params : URL parameters to include in request
: param follow _ redirects : enable redirection ( defaults to ` ` True ` ` )
: type follow _ redirects : bool
: raises check50 . Failure : if Flask application throws an uncaught exception
Example usage : :
check50 . flask . app ( " application . py " ) . post ( " / buy " , data = { " symbol " : " GOOG " , " shares " : 10 } ) . status ( 200)""" | return self . _send ( "POST" , route , data , params , follow_redirects = follow_redirects ) |
def toggle_wrap_mode ( self , checked ) :
"""Toggle wrap mode""" | if self . tabwidget is None :
return
for editor in self . editors :
editor . toggle_wrap_mode ( checked )
self . set_option ( 'wrap' , checked ) |
def alter_change_column ( self , table , column_name , field ) :
"""Support change columns .""" | context = super ( PostgresqlMigrator , self ) . alter_change_column ( table , column_name , field )
context . _sql . insert ( - 1 , 'TYPE' )
context . _sql . insert ( - 1 , ' ' )
return context |
def PushBack ( self , string = '' , ** unused_kwargs ) :
"""Push the match back on the stream .
Args :
string : optional data .""" | self . buffer = string + self . buffer
self . processed_buffer = self . processed_buffer [ : - len ( string ) ] |
def write_Track ( file , track , bpm = 120 , repeat = 0 , verbose = False ) :
"""Write a mingus . Track to a MIDI file .
Write the name to the file and set the instrument if the instrument has
the attribute instrument _ nr , which represents the MIDI instrument
number . The class MidiInstrument in mingus . containers . Instrument has
this attribute by default .""" | m = MidiFile ( )
t = MidiTrack ( bpm )
m . tracks = [ t ]
while repeat >= 0 :
t . play_Track ( track )
repeat -= 1
return m . write_file ( file , verbose ) |
def _get_column_value ( self , obj , column ) :
"""Return a single cell ' s value
: param obj obj : The instance we manage
: param dict column : The column description dictionnary
: returns : The associated value""" | return self . _get_formatted_val ( obj , column [ '__col__' ] . key , column ) |
def _move_modules ( self , temp_repo , destination ) :
"""Move odoo modules from the temp directory to the destination .
This step is different from a standard repository . In the base code
of Odoo , the modules are contained in a addons folder at the root
of the git repository . However , when deploying the application ,
those modules are placed inside the folder odoo / addons .
1 - Move modules from addons / to odoo / addons / ( with the base module ) .
2 - Move the whole odoo folder to the destination location .""" | tmp_addons = os . path . join ( temp_repo , 'addons' )
tmp_odoo_addons = os . path . join ( temp_repo , 'odoo/addons' )
folders = self . _get_module_folders ( tmp_addons )
for folder in folders :
force_move ( folder , tmp_odoo_addons )
tmp_odoo = os . path . join ( temp_repo , 'odoo' )
force_move ( tmp_odoo , destination ) |
def temporary_path ( self ) :
"""A context manager that enables a reasonably short , general and
magic - less way to solve the : ref : ` AtomicWrites ` .
* On * entering * , it will create the parent directories so the
temporary _ path is writeable right away .
This step uses : py : meth : ` FileSystem . mkdir ` .
* On * exiting * , it will move the temporary file if there was no exception thrown .
This step uses : py : meth : ` FileSystem . rename _ dont _ move `
The file system operations will be carried out by calling them on : py : attr : ` fs ` .
The typical use case looks like this :
. . code : : python
class MyTask ( luigi . Task ) :
def output ( self ) :
return MyFileSystemTarget ( . . . )
def run ( self ) :
with self . output ( ) . temporary _ path ( ) as self . temp _ output _ path :
run _ some _ external _ command ( output _ path = self . temp _ output _ path )""" | num = random . randrange ( 0 , 1e10 )
slashless_path = self . path . rstrip ( '/' ) . rstrip ( "\\" )
_temp_path = '{}-luigi-tmp-{:010}{}' . format ( slashless_path , num , self . _trailing_slash ( ) )
# TODO : os . path doesn ' t make sense here as it ' s os - dependent
tmp_dir = os . path . dirname ( slashless_path )
if tmp_dir :
self . fs . mkdir ( tmp_dir , parents = True , raise_if_exists = False )
yield _temp_path
# We won ' t reach here if there was an user exception .
self . fs . rename_dont_move ( _temp_path , self . path ) |
def convex_hull_collide ( nodes1 , nodes2 ) :
"""Determine if the convex hulls of two curves collide .
. . note : :
This is a helper for : func : ` from _ linearized ` .
Args :
nodes1 ( numpy . ndarray ) : Control points of a first curve .
nodes2 ( numpy . ndarray ) : Control points of a second curve .
Returns :
bool : Indicating if the convex hulls collide .""" | polygon1 = _helpers . simple_convex_hull ( nodes1 )
_ , polygon_size1 = polygon1 . shape
polygon2 = _helpers . simple_convex_hull ( nodes2 )
_ , polygon_size2 = polygon2 . shape
if polygon_size1 == 2 and polygon_size2 == 2 :
return line_line_collide ( polygon1 , polygon2 )
else :
return _helpers . polygon_collide ( polygon1 , polygon2 ) |
def pairwise ( values ) :
"""WITH values = [ a , b , c , d , . . . ]
RETURN [ ( a , b ) , ( b , c ) , ( c , d ) , . . . ]""" | i = iter ( values )
a = next ( i )
for b in i :
yield ( a , b )
a = b |
def start_semester_view ( request ) :
"""Initiates a semester " s worth of workshift , with the option to copy
workshift types from the previous semester .""" | page_name = "Start Semester"
year , season = utils . get_year_season ( )
start_date , end_date = utils . get_semester_start_end ( year , season )
semester_form = SemesterForm ( data = request . POST or None , initial = { "year" : year , "season" : season , "start_date" : start_date . strftime ( date_formats [ 0 ] ) , "end_date" : end_date . strftime ( date_formats [ 0 ] ) , } , prefix = "semester" , )
pool_forms = [ ]
try :
prev_semester = Semester . objects . latest ( "end_date" )
except Semester . DoesNotExist :
pass
else :
pools = WorkshiftPool . objects . filter ( semester = prev_semester , is_primary = False , )
for pool in pools :
form = StartPoolForm ( data = request . POST or None , initial = { "title" : pool . title , "hours" : pool . hours , } , prefix = "pool-{}" . format ( pool . pk ) , )
pool_forms . append ( form )
if semester_form . is_valid ( ) and all ( i . is_valid ( ) for i in pool_forms ) : # And save this semester
semester = semester_form . save ( )
for pool_form in pool_forms :
pool_form . save ( semester = semester )
return HttpResponseRedirect ( wurl ( "workshift:manage" , sem_url = semester . sem_url ) )
return render_to_response ( "start_semester.html" , { "page_name" : page_name , "semester_form" : semester_form , "pool_forms" : pool_forms , } , context_instance = RequestContext ( request ) ) |
def u ( d ) :
"""convert string , string container or unicode
: param d :
: return :""" | if six . PY2 :
if isinstance ( d , six . binary_type ) :
return d . decode ( "utf8" , "ignore" )
elif isinstance ( d , list ) :
return [ u ( x ) for x in d ]
elif isinstance ( d , tuple ) :
return tuple ( u ( x ) for x in d )
elif isinstance ( d , dict ) :
return dict ( ( u ( k ) , u ( v ) ) for k , v in six . iteritems ( d ) )
return d |
def component_type ( cls_or_slf , node ) :
"Return the type . group . label dotted information" | if node is None :
return ''
return cls_or_slf . type_formatter . format ( type = str ( type ( node ) . __name__ ) ) |
def fill_into_dict ( self , items , dest ) :
"""Take an iterable of ` items ` and group it into the given ` dest ` dict ,
using the : attr : ` key ` function .
The ` dest ` dict must either already contain the keys which are
generated by the : attr : ` key ` function for the items in ` items ` , or must
default them suitably . The values of the affected keys must be
sequences or objects with an : meth : ` append ` method which does what you
want it to do .""" | for item in items :
dest [ self . key ( item ) ] . append ( item ) |
def set_container_setting ( name , container , settings ) :
'''Set the value of the setting for an IIS container .
. . versionadded : : 2016.11.0
Args :
name ( str ) : The name of the IIS container .
container ( str ) : The type of IIS container . The container types are :
AppPools , Sites , SslBindings
settings ( dict ) : A dictionary of the setting names and their values .
Returns :
bool : True if successful , otherwise False
CLI Example :
. . code - block : : bash
salt ' * ' win _ iis . set _ container _ setting name = ' MyTestPool ' container = ' AppPools '
settings = " { ' managedPipeLineMode ' : ' Integrated ' } "''' | identityType_map2string = { '0' : 'LocalSystem' , '1' : 'LocalService' , '2' : 'NetworkService' , '3' : 'SpecificUser' , '4' : 'ApplicationPoolIdentity' }
identityType_map2numeric = { 'LocalSystem' : '0' , 'LocalService' : '1' , 'NetworkService' : '2' , 'SpecificUser' : '3' , 'ApplicationPoolIdentity' : '4' }
ps_cmd = list ( )
container_path = r"IIS:\{0}\{1}" . format ( container , name )
if not settings :
log . warning ( 'No settings provided' )
return False
# Treat all values as strings for the purpose of comparing them to existing values .
for setting in settings :
settings [ setting ] = six . text_type ( settings [ setting ] )
current_settings = get_container_setting ( name = name , container = container , settings = settings . keys ( ) )
if settings == current_settings :
log . debug ( 'Settings already contain the provided values.' )
return True
for setting in settings : # If the value is numeric , don ' t treat it as a string in PowerShell .
try :
complex ( settings [ setting ] )
value = settings [ setting ]
except ValueError :
value = "'{0}'" . format ( settings [ setting ] )
# Map to numeric to support server 2008
if setting == 'processModel.identityType' and settings [ setting ] in identityType_map2numeric . keys ( ) :
value = identityType_map2numeric [ settings [ setting ] ]
ps_cmd . extend ( [ 'Set-ItemProperty' , '-Path' , "'{0}'" . format ( container_path ) , '-Name' , "'{0}'" . format ( setting ) , '-Value' , '{0};' . format ( value ) ] )
cmd_ret = _srvmgr ( ps_cmd )
if cmd_ret [ 'retcode' ] != 0 :
msg = 'Unable to set settings for {0}: {1}' . format ( container , name )
raise CommandExecutionError ( msg )
# Get the fields post - change so that we can verify tht all values
# were modified successfully . Track the ones that weren ' t .
new_settings = get_container_setting ( name = name , container = container , settings = settings . keys ( ) )
failed_settings = dict ( )
for setting in settings : # map identity type from numeric to string for comparing
if setting == 'processModel.identityType' and settings [ setting ] in identityType_map2string . keys ( ) :
settings [ setting ] = identityType_map2string [ settings [ setting ] ]
if six . text_type ( settings [ setting ] ) != six . text_type ( new_settings [ setting ] ) :
failed_settings [ setting ] = settings [ setting ]
if failed_settings :
log . error ( 'Failed to change settings: %s' , failed_settings )
return False
log . debug ( 'Settings configured successfully: %s' , settings . keys ( ) )
return True |
def query_disc ( nside , vec , radius , inclusive = False , fact = 4 , nest = False ) :
"""Wrapper around healpy . query _ disc to deal with old healpy implementation .
nside : int
The nside of the Healpix map .
vec : float , sequence of 3 elements
The coordinates of unit vector defining the disk center .
radius : float
The radius ( in degrees ) of the disc
inclusive : bool , optional
If False , return the exact set of pixels whose pixel centers lie
within the disk ; if True , return all pixels that overlap with the disk ,
and maybe a few more . Default : False
fact : int , optional
Only used when inclusive = True . The overlapping test will be done at
the resolution fact * nside . For NESTED ordering , fact must be a power of 2,
else it can be any positive integer . Default : 4.
nest : bool , optional
if True , assume NESTED pixel ordering , otherwise , RING pixel ordering""" | try : # New - style call ( healpy 1.6.3)
return hp . query_disc ( nside , vec , np . radians ( radius ) , inclusive , fact , nest )
except Exception as e :
print ( e )
# Old - style call ( healpy 0.10.2)
return hp . query_disc ( nside , vec , np . radians ( radius ) , nest , deg = False ) |
def format ( format_string , cast = lambda x : x ) :
"""A pre - called helper to supply a modern string format ( the kind with { } instead of % s ) , so that
it can apply to each value in the column as it is rendered . This can be useful for string
padding like leading zeroes , or rounding floating point numbers to a certain number of decimal
places , etc .
If given , the ` ` cast ` ` argument should be a mapping function that coerces the input to whatever
type is required for the string formatting to work . Trying to push string data into a float
format will raise an exception , for example , so the ` ` float ` ` type itself could be given as
the ` ` cast ` ` function .
Examples : :
# Perform some 0 padding
item _ number = columns . FloatColumn ( " Item No . " , sources = [ ' number ' ] ,
processor = format ( " { : 03d } ) )
# Force a string column value to coerce to float and round to 2 decimal places
rating = columns . TextColumn ( " Rating " , sources = [ ' avg _ rating ' ] ,
processor = format ( " { : . 2f } " , cast = float ) )""" | def helper ( instance , * args , ** kwargs ) :
value = kwargs . get ( 'default_value' )
if value is None :
value = instance
value = cast ( value )
return format_string . format ( value , obj = instance )
return helper |
def transaction ( self , _filter = None , default = None , yield_resource = False ) :
"""transaction ( _ filter = None , default = None )
Claims a resource from the pool for use in a thread - safe ,
reentrant manner ( as part of a with statement ) . Resources are
created as needed when all members of the pool are claimed or
the pool is empty .
: param _ filter : a filter that can be used to select a member
of the pool
: type _ filter : callable
: param default : a value that will be used instead of calling
: meth : ` create _ resource ` if a new resource needs to be created
: param yield _ resource : set to True to yield the Resource object
itself
: type yield _ resource : boolean""" | resource = self . acquire ( _filter = _filter , default = default )
try :
if yield_resource :
yield resource
else :
yield resource . object
if resource . errored :
self . delete_resource ( resource )
except BadResource :
self . delete_resource ( resource )
raise
finally :
self . release ( resource ) |
def rvs ( self , * args , ** kwargs ) :
"""Draw Random Variates .
Parameters
size : int , optional ( default = 1)
random _ state _ : optional ( default = None )""" | # TODO REVERSE THIS FUCK PYTHON2
size = kwargs . pop ( 'size' , 1 )
random_state = kwargs . pop ( 'size' , None )
# don ' t ask me why it uses ` self . _ size `
return self . _kde . sample ( n_samples = size , random_state = random_state ) |
def prefetch ( self , file_size = None ) :
"""Pre - fetch the remaining contents of this file in anticipation of future
` . read ` calls . If reading the entire file , pre - fetching can
dramatically improve the download speed by avoiding roundtrip latency .
The file ' s contents are incrementally buffered in a background thread .
The prefetched data is stored in a buffer until read via the ` . read `
method . Once data has been read , it ' s removed from the buffer . The
data may be read in a random order ( using ` . seek ` ) ; chunks of the
buffer that haven ' t been read will continue to be buffered .
: param int file _ size :
When this is ` ` None ` ` ( the default ) , this method calls ` stat ` to
determine the remote file size . In some situations , doing so can
cause exceptions or hangs ( see ` # 562
< https : / / github . com / paramiko / paramiko / pull / 562 > ` _ ) ; as a
workaround , one may call ` stat ` explicitly and pass its value in
via this parameter .
. . versionadded : : 1.5.1
. . versionchanged : : 1.16.0
The ` ` file _ size ` ` parameter was added ( with no default value ) .
. . versionchanged : : 1.16.1
The ` ` file _ size ` ` parameter was made optional for backwards
compatibility .""" | if file_size is None :
file_size = self . stat ( ) . st_size
# queue up async reads for the rest of the file
chunks = [ ]
n = self . _realpos
while n < file_size :
chunk = min ( self . MAX_REQUEST_SIZE , file_size - n )
chunks . append ( ( n , chunk ) )
n += chunk
if len ( chunks ) > 0 :
self . _start_prefetch ( chunks ) |
def is_birthday ( self , dt = None ) :
"""Check if its the birthday .
Compares the date / month values of the two dates .
: rtype : bool""" | if dt is None :
dt = Date . today ( )
instance = dt1 = self . __class__ ( dt . year , dt . month , dt . day )
return ( self . month , self . day ) == ( instance . month , instance . day ) |
def is_python_binding_installed_on_pip ( self ) :
"""Check if the Python binding has already installed .""" | pip_version = self . _get_pip_version ( )
Log . debug ( 'Pip version: {0}' . format ( pip_version ) )
pip_major_version = int ( pip_version . split ( '.' ) [ 0 ] )
installed = False
# - - format is from pip v9.0.0
# https : / / pip . pypa . io / en / stable / news /
if pip_major_version >= 9 :
json_obj = self . _get_pip_list_json_obj ( )
for package in json_obj :
Log . debug ( 'pip list: {0}' . format ( package ) )
if package [ 'name' ] in ( 'rpm-python' , 'rpm' ) :
installed = True
Log . debug ( 'Package installed: {0}, {1}' . format ( package [ 'name' ] , package [ 'version' ] ) )
break
else : # Implementation for pip old version .
# It will be removed in the future .
lines = self . _get_pip_list_lines ( )
for line in lines :
if re . match ( '^rpm(-python)? ' , line ) :
installed = True
Log . debug ( 'Package installed.' )
break
return installed |
def _create_proxy ( proxy_setting ) :
"""Create a Network proxy for the given proxy settings .""" | proxy = QNetworkProxy ( )
proxy_scheme = proxy_setting [ 'scheme' ]
proxy_host = proxy_setting [ 'host' ]
proxy_port = proxy_setting [ 'port' ]
proxy_username = proxy_setting [ 'username' ]
proxy_password = proxy_setting [ 'password' ]
proxy_scheme_host = '{0}://{1}' . format ( proxy_scheme , proxy_host )
proxy . setType ( QNetworkProxy . HttpProxy )
if proxy_scheme_host : # proxy . setHostName ( proxy _ scheme _ host ) # does not work with scheme
proxy . setHostName ( proxy_host )
if proxy_port :
proxy . setPort ( proxy_port )
if proxy_username :
proxy . setUser ( proxy_username )
if proxy_password :
proxy . setPassword ( proxy_password )
return proxy |
def _generate_paragraphs ( package , subpackages ) :
"""Generate the paragraphs of the API documentation .""" | # API doc of each module .
for subpackage in _iter_subpackages ( package , subpackages ) :
subpackage_name = subpackage . __name__
yield "## {}" . format ( subpackage_name )
# Subpackage documentation .
yield _doc ( _import_module ( subpackage_name ) )
# List of top - level functions in the subpackage .
for func in _iter_functions ( subpackage ) :
yield '##### ' + _doc_function ( subpackage , func )
# All public classes .
for klass in _iter_classes ( subpackage ) : # Class documentation .
yield "### {}" . format ( _full_name ( subpackage , klass ) )
yield _doc ( klass )
yield "#### Methods"
for method in _iter_methods ( klass , package ) :
yield '##### ' + _doc_method ( klass , method )
yield "#### Properties"
for prop in _iter_properties ( klass , package ) :
yield '##### ' + _doc_property ( klass , prop ) |
def save_image ( self , img , filename = None , ** kwargs ) : # floating _ point = False ,
"""Save the image to the given * filename * in ninjotiff _ format .
. . _ ninjotiff : http : / / www . ssec . wisc . edu / ~ davidh / polar2grid / misc / NinJo _ Satellite _ Import _ Formats . html""" | filename = filename or self . get_filename ( ** img . data . attrs )
nt . save ( img , filename , ** kwargs ) |
def count_nonzero ( data , mapper = None , blen = None , storage = None , create = 'array' , ** kwargs ) :
"""Count the number of non - zero elements .""" | return reduce_axis ( data , reducer = np . count_nonzero , block_reducer = np . add , mapper = mapper , blen = blen , storage = storage , create = create , ** kwargs ) |
def save_models ( self , models_file ) :
"""Saves model parameters at each iteration of the optimization
: param models _ file : name of the file or a file buffer , in which the results are saved .""" | if self . model_parameters_iterations is None :
raise ValueError ( "No iterations have been carried out yet and hence no iterations of the BO can be saved" )
iterations = np . array ( range ( 1 , self . model_parameters_iterations . shape [ 0 ] + 1 ) ) [ : , None ]
results = np . hstack ( ( iterations , self . model_parameters_iterations ) )
header = [ 'Iteration' ] + self . model . get_model_parameters_names ( )
data = [ header ] + results . tolist ( )
self . _write_csv ( models_file , data ) |
def gen_search_gzh_url ( keyword , page = 1 ) :
"""拼接搜索 公众号 URL
Parameters
keyword : str or unicode
搜索文字
page : int , optional
页数 the default is 1
Returns
str
search _ gzh _ url""" | assert isinstance ( page , int ) and page > 0
qs_dict = OrderedDict ( )
qs_dict [ 'type' ] = _search_type_gzh
qs_dict [ 'page' ] = page
qs_dict [ 'ie' ] = 'utf8'
qs_dict [ 'query' ] = keyword
return 'http://weixin.sogou.com/weixin?{}' . format ( urlencode ( qs_dict ) ) |
def _set_cached_solve ( self , solver_dict ) :
"""Store a solve to memcached .
If there is NOT a resolve timestamp :
- store the solve to a non - timestamped entry .
If there IS a resolve timestamp ( let us call this T ) :
- if NO newer package in the solve has been released since T ,
- then store the solve to a non - timestamped entry ;
- else :
- store the solve to a timestamped entry .""" | if self . status_ != ResolverStatus . solved :
return
# don ' t cache failed solves
if not ( self . caching and self . memcached_servers ) :
return
# most recent release times get stored with solve result in the cache
releases_since_solve = False
release_times_dict = { }
variant_states_dict = { }
for variant in self . resolved_packages_ :
time_ = get_last_release_time ( variant . name , self . package_paths )
# don ' t cache if a release time isn ' t known
if time_ == 0 :
self . _print ( "Did not send memcache key: a repository could " "not provide a most recent release time for %r" , variant . name )
return
if self . timestamp and self . timestamp < time_ :
releases_since_solve = True
release_times_dict [ variant . name ] = time_
repo = variant . resource . _repository
variant_states_dict [ variant . name ] = repo . get_variant_state_handle ( variant . resource )
timestamped = ( self . timestamp and releases_since_solve )
key = self . _memcache_key ( timestamped = timestamped )
data = ( solver_dict , release_times_dict , variant_states_dict )
with self . _memcached_client ( ) as client :
client . set ( key , data )
self . _print ( "Sent memcache key: %r" , key ) |
def dlogpdf_dtheta ( self , f , y , Y_metadata = None ) :
"""TODO : Doc strings""" | if self . size > 0 :
if self . not_block_really :
raise NotImplementedError ( "Need to make a decorator for this!" )
if isinstance ( self . gp_link , link_functions . Identity ) :
return self . dlogpdf_link_dtheta ( f , y , Y_metadata = Y_metadata )
else :
inv_link_f = self . gp_link . transf ( f )
return self . dlogpdf_link_dtheta ( inv_link_f , y , Y_metadata = Y_metadata )
else : # There are no parameters so return an empty array for derivatives
return np . zeros ( ( 0 , f . shape [ 0 ] , f . shape [ 1 ] ) ) |
def connect_async ( self , host , port = 1883 , keepalive = 60 , bind_address = "" ) :
"""Connect to a remote broker asynchronously . This is a non - blocking
connect call that can be used with loop _ start ( ) to provide very quick
start .
host is the hostname or IP address of the remote broker .
port is the network port of the server host to connect to . Defaults to
1883 . Note that the default port for MQTT over SSL / TLS is 8883 so if you
are using tls _ set ( ) the port may need providing .
keepalive : Maximum period in seconds between communications with the
broker . If no other messages are being exchanged , this controls the
rate at which the client will send ping messages to the broker .""" | if host is None or len ( host ) == 0 :
raise ValueError ( 'Invalid host.' )
if port <= 0 :
raise ValueError ( 'Invalid port number.' )
if keepalive < 0 :
raise ValueError ( 'Keepalive must be >=0.' )
if bind_address != "" and bind_address is not None :
if ( sys . version_info [ 0 ] == 2 and sys . version_info [ 1 ] < 7 ) or ( sys . version_info [ 0 ] == 3 and sys . version_info [ 1 ] < 2 ) :
raise ValueError ( 'bind_address requires Python 2.7 or 3.2.' )
self . _host = host
self . _port = port
self . _keepalive = keepalive
self . _bind_address = bind_address
self . _state_mutex . acquire ( )
self . _state = mqtt_cs_connect_async
self . _state_mutex . release ( ) |
def _summarize_result ( self , root_action , leaf_eot ) :
"""Return a dict with useful information that summarizes this action .""" | root_board = root_action . parent . board
action_detail = root_action . position_pair
score = self . _relative_score ( root_action , leaf_eot , root_action . parent . player , root_action . parent . opponent )
# mana drain info
total_leaves = 0
mana_drain_leaves = 0
for leaf in root_action . leaves ( ) :
total_leaves += 1
if leaf . is_mana_drain :
mana_drain_leaves += 1
summary = base . Summary ( root_board , action_detail , score , mana_drain_leaves , total_leaves )
return summary |
def version ( self , path , postmap = None , ** params ) :
"""Return the taskforce version .
Supports standard options .""" | q = httpd . merge_query ( path , postmap )
ans = { 'taskforce' : taskforce_version , 'python' : '.' . join ( str ( x ) for x in sys . version_info [ : 3 ] ) , }
ans [ 'platform' ] = { 'system' : platform . system ( ) , }
# Add in some extra details if this is a control path .
# These might give away too many details on a public
# path .
if self . _httpd . allow_control :
ans [ 'platform' ] [ 'platform' ] = platform . platform ( )
ans [ 'platform' ] [ 'release' ] = platform . release ( )
return self . _format ( ans , q ) |
def _check_generic_pos ( self , * tokens ) :
"""Check if the different tokens were logged in one record , any level .""" | for record in self . records :
if all ( token in record . message for token in tokens ) :
return
# didn ' t exit , all tokens are not present in the same record
msgs = [ "Tokens {} not found, all was logged is..." . format ( tokens ) ]
for record in self . records :
msgs . append ( " {:9s} {!r}" . format ( record . levelname , record . message ) )
self . test_instance . fail ( "\n" . join ( msgs ) ) |
def hash_from_algo ( algo ) :
"""Return a : mod : ` hashlib ` hash given the : xep : ` 300 ` ` algo ` .
: param algo : The algorithm identifier as defined in : xep : ` 300 ` .
: type algo : : class : ` str `
: raises NotImplementedError : if the hash algortihm is not supported by
: mod : ` hashlib ` .
: raises ValueError : if the hash algorithm MUST NOT be supported .
: return : A hash object from : mod : ` hashlib ` or compatible .
If the ` algo ` is not supported by the : mod : ` hashlib ` module ,
: class : ` NotImplementedError ` is raised .""" | try :
enabled , ( fun_name , fun_args , fun_kwargs ) = _HASH_ALGO_MAP [ algo ]
except KeyError :
raise NotImplementedError ( "hash algorithm {!r} unknown" . format ( algo ) ) from None
if not enabled :
raise ValueError ( "support of {} in XMPP is forbidden" . format ( algo ) )
try :
fun = getattr ( hashlib , fun_name )
except AttributeError as exc :
raise NotImplementedError ( "{} not supported by hashlib" . format ( algo ) ) from exc
return fun ( * fun_args , ** fun_kwargs ) |
def traverse ( data , key , default = None , delimiter = DEFAULT_TARGET_DELIM ) :
'''Traverse a dict or list using a colon - delimited ( or otherwise delimited ,
using the ` ` delimiter ` ` param ) target string . The target ` ` foo : bar : 0 ` ` will
return ` ` data [ ' foo ' ] [ ' bar ' ] [ 0 ] ` ` if this value exists , and will otherwise
return the dict in the default argument .
Function will automatically determine the target type .
The target ` ` foo : bar : 0 ` ` will return data [ ' foo ' ] [ ' bar ' ] [ 0 ] if data like
` ` { ' foo ' : { ' bar ' : [ ' baz ' ] } } ` ` , if data like ` ` { ' foo ' : { ' bar ' : { ' 0 ' : ' baz ' } } } ` `
then ` ` return data [ ' foo ' ] [ ' bar ' ] [ ' 0 ' ] ` `
CLI Example :
. . code - block : : bash
salt ' * ' napalm _ formula . traverse " { ' foo ' : { ' bar ' : { ' baz ' : True } } } " foo : baz : bar''' | return _traverse_dict_and_list ( data , key , default = default , delimiter = delimiter ) |
def gen3d_conformer ( self ) :
"""A combined method to first generate 3D structures from 0D or 2D
structures and then find the minimum energy conformer :
1 . Use OBBuilder to create a 3D structure using rules and ring templates
2 . Do 250 steps of a steepest descent geometry optimization with the
MMFF94 forcefield
3 . Do 200 iterations of a Weighted Rotor conformational search
( optimizing each conformer with 25 steps of a steepest descent )
4 . Do 250 steps of a conjugate gradient geometry optimization .
Warning from openbabel docs :
For many applications where 100s if not 1000s of molecules need to be
processed , gen3d is rather SLOW . Sometimes this function can cause a
segmentation fault .
A future version of Open Babel will provide options for slow / medium / fast
3D structure generation which will involve different compromises
between speed and finding the global energy minimum .""" | gen3d = ob . OBOp . FindType ( "Gen3D" )
gen3d . Do ( self . _obmol ) |
def label_present ( name , value , node = None , apiserver_url = None ) :
'''. . versionadded : : 2016.3.0
Set label to the current node
CLI Example :
. . code - block : : bash
salt ' * ' k8s . label _ present hw / disktype ssd
salt ' * ' k8s . label _ present hw / disktype ssd kube - node . cluster . local http : / / kube - master . cluster . local''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
# Get salt minion ID
node = _guess_node_id ( node )
# Try to get kubernetes master
apiserver_url = _guess_apiserver ( apiserver_url )
if apiserver_url is None :
return False
# Get all labels
labels = _get_labels ( node , apiserver_url )
if name not in labels : # This is a new label
ret [ 'changes' ] = { name : value }
labels [ name ] = str ( value )
res = _set_labels ( node , apiserver_url , labels )
if res . get ( 'status' ) == 409 : # there is an update during operation , need to retry
log . debug ( "Got 409, will try later" )
ret [ 'changes' ] = { }
ret [ 'comment' ] = "Could not create label {0}, please retry" . format ( name )
else :
ret [ 'comment' ] = "Label {0} created" . format ( name )
elif labels . get ( name ) != str ( value ) : # This is a old label and we are going to edit it
ret [ 'changes' ] = { name : str ( value ) }
labels [ name ] = str ( value )
res = _set_labels ( node , apiserver_url , labels )
if res . get ( 'status' ) == 409 : # there is an update during operation , need to retry
log . debug ( "Got 409, will try later" )
ret [ 'changes' ] = { }
ret [ 'comment' ] = "Could not update label {0}, please retry" . format ( name )
else :
ret [ 'comment' ] = "Label {0} updated" . format ( name )
else : # This is a old label and it has already the wanted value
ret [ 'comment' ] = "Label {0} already set" . format ( name )
return ret |
def get_path ( self ) :
"""Gets the path to the focused statistics . Each step is a hash of
statistics object .""" | path = deque ( )
__ , node = self . get_focus ( )
while not node . is_root ( ) :
stats = node . get_value ( )
path . appendleft ( hash ( stats ) )
node = node . get_parent ( )
return path |
def get_filenames ( dirname ) :
"""Return all model output filenames inside a model output directory ,
sorted by iteration number .
Parameters
dirname : str
A path to a directory .
Returns
filenames : list [ str ]
Paths to all output files inside ` dirname ` , sorted in order of
increasing iteration number .""" | filenames = glob . glob ( '{}/*.pkl' . format ( dirname ) )
return sorted ( filenames , key = _f_to_i ) |
def _execute ( self , sql , args ) :
"""执行sql语句
: param sql : sql语句
: param args : 参数
: return : 返回的都是数组对象""" | sql = sql . lower ( ) . strip ( )
args = args or ( )
tmp = sql [ : 6 ]
with ( yield self . _pool . Connection ( ) ) as conn :
try :
with conn . cursor ( ) as cursor :
yield cursor . execute ( sql , args = args )
if tmp == 'select' :
datas = cursor . fetchall ( )
return datas
except Exception as e :
err = traceback . format_exc ( )
print ( err )
if tmp in [ 'insert' , 'update' , 'delete' ] :
yield conn . rollback ( )
else :
if tmp == 'insert' :
insertId = conn . insert_id ( )
yield conn . commit ( )
# 返回插入的主键id
return insertId
elif tmp in [ 'update' , 'delete' ] :
yield conn . commit ( ) |
def tempoAdjust3 ( self , tempoFactor ) :
"""Adjust tempo by aggregating active basal cell votes for pre vs . post ( like tempoAdjust2)
- > if vote total = 0 ( tied ) , use result of last vote
: param tempoFactor : scaling signal to MC clock from last sequence item
: return : adjusted scaling signal""" | late_votes = ( len ( self . adtm . getNextBasalPredictedCells ( ) ) - len ( self . apicalIntersect ) ) * - 1
early_votes = len ( self . apicalIntersect )
votes = late_votes + early_votes
print ( 'vote tally' , votes )
if votes > 0 :
tempoFactor = tempoFactor * 0.5
self . prevVote = 0.5
print 'speed up'
elif votes < 0 :
tempoFactor = tempoFactor * 2
self . prevVote = 2
print 'slow down'
elif votes == 0 :
if self . prevVote == 0 :
print 'pick randomly'
if random . random ( ) > 0.5 :
tempoFactor = tempoFactor * 0.5
self . prevVote = 0.5
print 'random pick: speed up'
else :
tempoFactor = tempoFactor * 2
self . prevVote = 2
print 'random pick: slow down'
else :
print 'tied: re-choose last choice'
tempoFactor = tempoFactor * self . prevVote
return tempoFactor |
def make_redirect_url ( self , path_info , query_args = None , domain_part = None ) :
"""Creates a redirect URL .
: internal :""" | suffix = ''
if query_args :
suffix = '?' + self . encode_query_args ( query_args )
return str ( '%s://%s/%s%s' % ( self . url_scheme , self . get_host ( domain_part ) , posixpath . join ( self . script_name [ : - 1 ] . lstrip ( '/' ) , url_quote ( path_info . lstrip ( '/' ) , self . map . charset , safe = '/:|+' ) ) , suffix ) ) |
def get_by_id ( self , style_id , style_type ) :
"""Return the style of * style _ type * matching * style _ id * .
Returns the default for * style _ type * if * style _ id * is not found or is | None | , or
if the style having * style _ id * is not of * style _ type * .""" | if style_id is None :
return self . default ( style_type )
return self . _get_by_id ( style_id , style_type ) |
def create_push ( self , push , repository_id , project = None ) :
"""CreatePush .
Push changes to the repository .
: param : class : ` < GitPush > < azure . devops . v5_0 . git . models . GitPush > ` push :
: param str repository _ id : The name or ID of the repository .
: param str project : Project ID or project name
: rtype : : class : ` < GitPush > < azure . devops . v5_0 . git . models . GitPush > `""" | route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if repository_id is not None :
route_values [ 'repositoryId' ] = self . _serialize . url ( 'repository_id' , repository_id , 'str' )
content = self . _serialize . body ( push , 'GitPush' )
response = self . _send ( http_method = 'POST' , location_id = 'ea98d07b-3c87-4971-8ede-a613694ffb55' , version = '5.0' , route_values = route_values , content = content )
return self . _deserialize ( 'GitPush' , response ) |
def head ( self , file_path ) :
"""Onlye read the first packets that come , try to max out at 1024kb
: return : up to 1024b of the first block of the file""" | processor = lambda path , node , tail_only = True , append = False : self . _handle_head ( path , node )
# Find items and go
for item in self . _client . _find_items ( [ file_path ] , processor , include_toplevel = True , include_children = False , recurse = False ) :
if item :
return item |
def get_quoted_foreign_columns ( self , platform ) :
"""Returns the quoted representation of the referenced table column names
the foreign key constraint is associated with .
But only if they were defined with one or the referenced table column name
is a keyword reserved by the platform .
Otherwise the plain unquoted value as inserted is returned .
: param platform : The platform to use for quotation .
: type platform : Platform
: rtype : list""" | columns = [ ]
for column in self . _foreign_column_names . values ( ) :
columns . append ( column . get_quoted_name ( platform ) )
return columns |
def search_user_for_facet ( self , facet , ** kwargs ) : # noqa : E501
"""Lists the values of a specific facet over the customer ' s users # noqa : E501
# noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . search _ user _ for _ facet ( facet , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str facet : ( required )
: param FacetSearchRequestContainer body :
: return : ResponseContainerFacetResponse
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . search_user_for_facet_with_http_info ( facet , ** kwargs )
# noqa : E501
else :
( data ) = self . search_user_for_facet_with_http_info ( facet , ** kwargs )
# noqa : E501
return data |
def _refine_stmt ( self , stmt : Statement , sctx : SchemaContext ) -> None :
"""Handle * * refine * * statement .""" | target = self . get_schema_descendant ( sctx . schema_data . sni2route ( stmt . argument , sctx ) )
if not sctx . schema_data . if_features ( stmt , sctx . text_mid ) :
target . parent . children . remove ( target )
else :
target . _handle_substatements ( stmt , sctx ) |
def selected_hazard_category ( self ) :
"""Obtain the hazard category selected by user .
: returns : Metadata of the selected hazard category .
: rtype : dict , None""" | item = self . lstHazardCategories . currentItem ( )
try :
return definition ( item . data ( QtCore . Qt . UserRole ) )
except ( AttributeError , NameError ) :
return None |
def watch ( self , username , watch = { "friend" : True , "deviations" : True , "journals" : True , "forum_threads" : True , "critiques" : True , "scraps" : True , "activity" : True , "collections" : True } ) :
"""Watch a user
: param username : The username you want to watch""" | if self . standard_grant_type is not "authorization_code" :
raise DeviantartError ( "Authentication through Authorization Code (Grant Type) is required in order to connect to this endpoint." )
response = self . _req ( '/user/friends/watch/{}' . format ( username ) , post_data = { "watch[friend]" : watch [ 'friend' ] , "watch[deviations]" : watch [ 'deviations' ] , "watch[journals]" : watch [ 'journals' ] , "watch[forum_threads]" : watch [ 'forum_threads' ] , "watch[critiques]" : watch [ 'critiques' ] , "watch[scraps]" : watch [ 'scraps' ] , "watch[activity]" : watch [ 'activity' ] , "watch[collections]" : watch [ 'collections' ] , } )
return response [ 'success' ] |
def visit_ImportFrom ( self , node ) :
"""Visit an from - import node .""" | line = self . _code_lines [ node . lineno - 1 ]
module_name = line . split ( "from" ) [ 1 ] . split ( "import" ) [ 0 ] . strip ( )
for name in node . names :
imported_name = name . name
if name . asname :
imported_name = name . asname + "::" + imported_name
self . imported_names [ imported_name ] = module_name |
def inject ( fun : Callable ) -> Callable :
"""A decorator for injection dependencies into functions / methods , based
on their type annotations .
. . code - block : : python
class SomeClass :
@ inject
def _ _ init _ _ ( self , my _ dep : DepType ) - > None :
self . my _ dep = my _ dep
. . important : :
On the opposite to : class : ` ~ haps . Inject ` , dependency is injected
at the moment of method invocation . In case of decorating ` _ _ init _ _ ` ,
dependency is injected when ` SomeClass ` instance is created .
: param fun : callable with annotated parameters
: return : decorated callable""" | sig = inspect . signature ( fun )
injectables : Dict [ str , Any ] = { }
for name , param in sig . parameters . items ( ) :
type_ = param . annotation
if name == 'self' :
continue
else :
injectables [ name ] = type_
@ wraps ( fun )
def _inner ( * args , ** kwargs ) :
container = Container ( )
for n , t in injectables . items ( ) :
if n not in kwargs :
kwargs [ n ] = container . get_object ( t )
return fun ( * args , ** kwargs )
return _inner |
def get_json_response_object ( self , datatable ) :
"""Returns the JSON - compatible dictionary that will be serialized for an AJAX response .
The value names are in the form " s ~ " for strings , " i ~ " for integers , and " a ~ " for arrays ,
if you ' re unfamiliar with the old C - style jargon used in dataTables . js . " aa ~ " means
" array of arrays " . In some instances , the author uses " ao ~ " for " array of objects " , an
object being a javascript dictionary .""" | # Ensure the object list is calculated .
# Calling get _ records ( ) will do this implicitly , but we want simultaneous access to the
# ' total _ initial _ record _ count ' , and ' unpaged _ record _ count ' values .
datatable . populate_records ( )
draw = getattr ( self . request , self . request . method ) . get ( 'draw' , None )
if draw is not None :
draw = escape_uri_path ( draw )
response_data = { 'draw' : draw , 'recordsFiltered' : datatable . unpaged_record_count , 'recordsTotal' : datatable . total_initial_record_count , 'data' : [ dict ( record , ** { 'DT_RowId' : record . pop ( 'pk' ) , 'DT_RowData' : record . pop ( '_extra_data' ) , } ) for record in datatable . get_records ( ) ] , }
return response_data |
def visit_BoolOp ( self , node ) :
"""Return type may come from any boolop operand .""" | return sum ( ( self . visit ( value ) for value in node . values ) , [ ] ) |
def unlink ( self ) :
"""Overrides orm unlink method .
@ param self : The object pointer
@ return : True / False .""" | sale_line_obj = self . env [ 'sale.order.line' ]
fr_obj = self . env [ 'folio.room.line' ]
for line in self :
if line . order_line_id :
sale_unlink_obj = ( sale_line_obj . browse ( [ line . order_line_id . id ] ) )
for rec in sale_unlink_obj :
room_obj = self . env [ 'hotel.room' ] . search ( [ ( 'name' , '=' , rec . name ) ] )
if room_obj . id :
folio_arg = [ ( 'folio_id' , '=' , line . folio_id . id ) , ( 'room_id' , '=' , room_obj . id ) ]
folio_room_line_myobj = fr_obj . search ( folio_arg )
if folio_room_line_myobj . id :
folio_room_line_myobj . unlink ( )
room_obj . write ( { 'isroom' : True , 'status' : 'available' } )
sale_unlink_obj . unlink ( )
return super ( HotelFolioLine , self ) . unlink ( ) |
def get_servers ( self ) :
"""Create the list of Server object inside the Datacenter objects .
Build an internal list of VM Objects ( pro or smart ) as iterator .
: return : bool""" | json_scheme = self . gen_def_json_scheme ( 'GetServers' )
json_obj = self . call_method_post ( method = 'GetServers' , json_scheme = json_scheme )
self . json_servers = json_obj
# if this method is called I assume that i must re - read the data
# so i reinitialize the vmlist
self . vmlist = VMList ( )
# getting all instanced IP in case the list is empty
if len ( self . iplist ) <= 0 :
self . get_ip ( )
for elem in dict ( json_obj ) [ "Value" ] :
if elem [ 'HypervisorType' ] is 4 :
s = Smart ( interface = self , sid = elem [ 'ServerId' ] )
else :
s = Pro ( interface = self , sid = elem [ 'ServerId' ] )
s . vm_name = elem [ 'Name' ]
s . cpu_qty = elem [ 'CPUQuantity' ]
s . ram_qty = elem [ 'RAMQuantity' ]
s . status = elem [ 'ServerStatus' ]
s . datacenter_id = elem [ 'DatacenterId' ]
s . wcf_baseurl = self . wcf_baseurl
s . auth = self . auth
s . hd_qty = elem [ 'HDQuantity' ]
s . hd_total_size = elem [ 'HDTotalSize' ]
if elem [ 'HypervisorType' ] is 4 :
ssd = self . get_server_detail ( elem [ 'ServerId' ] )
try :
s . ip_addr = str ( ssd [ 'EasyCloudIPAddress' ] [ 'Value' ] )
except TypeError :
s . ip_addr = 'Not retrieved.'
else :
s . ip_addr = [ ]
for ip in self . iplist :
if ip . serverid == s . sid :
s . ip_addr . append ( ip )
self . vmlist . append ( s )
return True if json_obj [ 'Success' ] is True else False |
def get_static ( root = None ) :
'''. . versionadded : : 2015.8.5
Return a list of all static services
root
Enable / disable / mask unit files in the specified root directory
CLI Example :
. . code - block : : bash
salt ' * ' service . get _ static''' | ret = set ( )
# Get static systemd units . Can ' t use - - state = static here because it ' s
# not present until systemd 216.
out = __salt__ [ 'cmd.run' ] ( _systemctl_cmd ( '--full --no-legend --no-pager list-unit-files' , root = root ) , python_shell = False , ignore_retcode = True )
for line in salt . utils . itertools . split ( out , '\n' ) :
try :
fullname , unit_state = line . strip ( ) . split ( None , 1 )
except ValueError :
continue
else :
if unit_state != 'static' :
continue
try :
unit_name , unit_type = fullname . rsplit ( '.' , 1 )
except ValueError :
continue
if unit_type in VALID_UNIT_TYPES :
ret . add ( unit_name if unit_type == 'service' else fullname )
# sysvinit services cannot be static
return sorted ( ret ) |
def run ( self , host = None , port = None , debug = None , use_reloader = None , open_browser = False ) :
"""Starts a server to render the README .""" | if host is None :
host = self . config [ 'HOST' ]
if port is None :
port = self . config [ 'PORT' ]
if debug is None :
debug = self . debug
if use_reloader is None :
use_reloader = self . config [ 'DEBUG_GRIP' ]
# Verify the server is not already running and start
with self . _run_mutex :
if self . _shutdown_event :
raise AlreadyRunningError ( )
self . _shutdown_event = threading . Event ( )
# Authentication message
if self . auth and not self . quiet :
if isinstance ( self . auth , tuple ) :
username , password = self . auth
auth_method = ( 'credentials: {0}' . format ( username ) if username else 'personal access token' )
else :
auth_method = type ( self . auth ) . __name__
print ( ' * Using' , auth_method , file = sys . stderr )
# Get random port manually when needed ahead of time
if port == 0 and open_browser :
sock = socket . socket ( socket . AF_INET , socket . SOCK_STREAM )
sock . bind ( ( 'localhost' , 0 ) )
port = sock . getsockname ( ) [ 1 ]
sock . close ( )
# Open browser
browser_thread = ( start_browser_when_ready ( host , port , self . _shutdown_event ) if open_browser else None )
# Run local server
super ( Grip , self ) . run ( host , port , debug = debug , use_reloader = use_reloader , threaded = True )
# Signal to the polling and browser threads that they should exit
if not self . quiet :
print ( ' * Shutting down...' )
self . _shutdown_event . set ( )
# Wait for browser thread to finish
if browser_thread :
browser_thread . join ( )
# Cleanup
self . _shutdown_event = None |
def getopt_default ( self , option ) :
"""Default method to get an option""" | if option not in self . conf :
print ( "Unrecognized option %r" % option )
return
print ( "%s: %s" % ( option , self . conf [ option ] ) ) |
def beacon ( config ) :
'''Scan the shell execve routines . This beacon will convert all login shells
. . code - block : : yaml
beacons :
sh : [ ]''' | ret = [ ]
pkey = 'sh.vt'
shells = _get_shells ( )
ps_out = __salt__ [ 'status.procs' ] ( )
track_pids = [ ]
for pid in ps_out :
if any ( ps_out [ pid ] . get ( 'cmd' , '' ) . lstrip ( '-' ) in shell for shell in shells ) :
track_pids . append ( pid )
if pkey not in __context__ :
__context__ [ pkey ] = { }
for pid in track_pids :
if pid not in __context__ [ pkey ] :
cmd = [ 'strace' , '-f' , '-e' , 'execve' , '-p' , '{0}' . format ( pid ) ]
__context__ [ pkey ] [ pid ] = { }
__context__ [ pkey ] [ pid ] [ 'vt' ] = salt . utils . vt . Terminal ( cmd , log_stdout = True , log_stderr = True , stream_stdout = False , stream_stderr = False )
__context__ [ pkey ] [ pid ] [ 'user' ] = ps_out [ pid ] . get ( 'user' )
for pid in list ( __context__ [ pkey ] ) :
out = ''
err = ''
while __context__ [ pkey ] [ pid ] [ 'vt' ] . has_unread_data :
tout , terr = __context__ [ pkey ] [ pid ] [ 'vt' ] . recv ( )
if not terr :
break
out += salt . utils . stringutils . to_unicode ( tout or '' )
err += terr
for line in err . split ( '\n' ) :
event = { 'args' : [ ] , 'tag' : pid }
if 'execve' in line :
comps = line . split ( 'execve' ) [ 1 ] . split ( '"' )
for ind , field in enumerate ( comps ) :
if ind == 1 :
event [ 'cmd' ] = field
continue
if ind % 2 != 0 :
event [ 'args' ] . append ( field )
event [ 'user' ] = __context__ [ pkey ] [ pid ] [ 'user' ]
ret . append ( event )
if not __context__ [ pkey ] [ pid ] [ 'vt' ] . isalive ( ) :
__context__ [ pkey ] [ pid ] [ 'vt' ] . close ( )
__context__ [ pkey ] . pop ( pid )
return ret |
def accumulate ( self , buf ) :
'''add in some more bytes''' | accum = self . crc
for b in buf :
tmp = b ^ ( accum & 0xff )
tmp = ( tmp ^ ( tmp << 4 ) ) & 0xFF
accum = ( accum >> 8 ) ^ ( tmp << 8 ) ^ ( tmp << 3 ) ^ ( tmp >> 4 )
self . crc = accum |
def _backwards_aliases ( self ) :
"""In order to keep this backwards - compatible with previous versions ,
alias the old names to the new methods .""" | self . list_containers = self . list_container_names
self . get_all_containers = self . list
self . get_container = self . get
self . create_container = self . create
self . delete_container = self . delete
self . get_container_objects = self . list_container_objects
self . get_container_object_names = self . list_container_object_names
self . get_info = self . get_account_info |
def get_num_nodes ( properties = None , hadoop_conf_dir = None , offline = False ) :
"""Get the number of task trackers in the Hadoop cluster .
All arguments are passed to : func : ` get _ task _ trackers ` .""" | return len ( get_task_trackers ( properties , hadoop_conf_dir , offline ) ) |
def _connect ( self , id_mask ) :
"""Connects to all of the load cells serially .""" | # Get all devices attached as USB serial
all_devices = glob . glob ( '/dev/ttyUSB*' )
# Identify which of the devices are LoadStar Serial Sensors
sensors = [ ]
for device in all_devices :
try :
ser = serial . Serial ( port = device , timeout = 0.5 , exclusive = True )
ser . write ( 'ID\r' )
ser . flush ( )
time . sleep ( 0.05 )
resp = ser . read ( 13 )
ser . close ( )
if len ( resp ) >= 10 and resp [ : len ( id_mask ) ] == id_mask :
sensors . append ( ( device , resp . rstrip ( '\r\n' ) ) )
except :
continue
sensors = sorted ( sensors , key = lambda x : x [ 1 ] )
# Connect to each of the serial devices
serials = [ ]
for device , key in sensors :
ser = serial . Serial ( port = device , timeout = 0.5 )
serials . append ( ser )
rospy . loginfo ( 'Connected to load cell {} at {}' . format ( key , device ) )
return serials |
def package_data ( pkg , root_list ) :
"""Generic function to find package _ data for ` pkg ` under ` root ` .""" | data = [ ]
for root in root_list :
for dirname , _ , files in os . walk ( os . path . join ( pkg , root ) ) :
for fname in files :
data . append ( os . path . relpath ( os . path . join ( dirname , fname ) , pkg ) )
return { pkg : data } |
def deleteBy ( self , func ) :
'Delete rows for which func ( row ) is true . Returns number of deleted rows .' | oldrows = copy ( self . rows )
oldidx = self . cursorRowIndex
ndeleted = 0
row = None
# row to re - place cursor after
while oldidx < len ( oldrows ) :
if not func ( oldrows [ oldidx ] ) :
row = self . rows [ oldidx ]
break
oldidx += 1
self . rows . clear ( )
for r in Progress ( oldrows , 'deleting' ) :
if not func ( r ) :
self . rows . append ( r )
if r is row :
self . cursorRowIndex = len ( self . rows ) - 1
else :
ndeleted += 1
status ( 'deleted %s %s' % ( ndeleted , self . rowtype ) )
return ndeleted |
def dc_element ( self , parent , name , text ) :
"""Add DC element ` name ` containing ` text ` to ` parent ` .""" | if self . dc_uri in self . namespaces :
dcel = SchemaNode ( self . namespaces [ self . dc_uri ] + ":" + name , text = text )
parent . children . insert ( 0 , dcel ) |
def append ( self , other , inplace = True , pad = None , gap = None , resize = True ) :
"""Connect another series onto the end of the current one .
Parameters
other : ` Series `
another series of the same type to connect to this one
inplace : ` bool ` , optional
perform operation in - place , modifying current series ,
otherwise copy data and return new series , default : ` True `
. . warning : :
` inplace ` append bypasses the reference check in
` numpy . ndarray . resize ` , so be carefully to only use this
for arrays that haven ' t been sharing their memory !
pad : ` float ` , optional
value with which to pad discontiguous series ,
by default gaps will result in a ` ValueError ` .
gap : ` str ` , optional
action to perform if there ' s a gap between the other series
and this one . One of
- ` ` ' raise ' ` ` - raise a ` ValueError `
- ` ` ' ignore ' ` ` - remove gap and join data
- ` ` ' pad ' ` ` - pad gap with zeros
If ` ` pad ` ` is given and is not ` None ` , the default is ` ` ' pad ' ` ` ,
otherwise ` ` ' raise ' ` ` . If ` ` gap = ' pad ' ` ` is given , the default
for ` ` pad ` ` is ` ` 0 ` ` .
resize : ` bool ` , optional
resize this array to accommodate new data , otherwise shift the
old data to the left ( potentially falling off the start ) and
put the new data in at the end , default : ` True ` .
Returns
series : ` Series `
a new series containing joined data sets""" | if gap is None :
gap = 'raise' if pad is None else 'pad'
if pad is None and gap == 'pad' :
pad = 0.
# check metadata
self . is_compatible ( other )
# make copy if needed
if not inplace :
self = self . copy ( )
# fill gap
if self . is_contiguous ( other ) != 1 :
if gap == 'pad' :
ngap = floor ( ( other . xspan [ 0 ] - self . xspan [ 1 ] ) / self . dx . value + 0.5 )
if ngap < 1 :
raise ValueError ( "Cannot append {0} that starts before this one:\n" " {0} 1 span: {1}\n {0} 2 span: {2}" . format ( type ( self ) . __name__ , self . xspan , other . xspan ) )
gapshape = list ( self . shape )
gapshape [ 0 ] = int ( ngap )
padding = ( numpy . ones ( gapshape ) * pad ) . astype ( self . dtype )
self . append ( padding , inplace = True , resize = resize )
elif gap == 'ignore' :
pass
elif self . xspan [ 0 ] < other . xspan [ 0 ] < self . xspan [ 1 ] :
raise ValueError ( "Cannot append overlapping {0}s:\n" " {0} 1 span: {1}\n {0} 2 span: {2}" . format ( type ( self ) . __name__ , self . xspan , other . xspan ) )
else :
raise ValueError ( "Cannot append discontiguous {0}\n" " {0} 1 span: {1}\n {0} 2 span: {2}" . format ( type ( self ) . __name__ , self . xspan , other . xspan ) )
# check empty other
if not other . size :
return self
# resize first
if resize :
N = other . shape [ 0 ]
s = list ( self . shape )
s [ 0 ] = self . shape [ 0 ] + other . shape [ 0 ]
try :
self . resize ( s , refcheck = False )
except ValueError as e :
if 'resize only works on single-segment arrays' in str ( e ) :
self = self . copy ( )
self . resize ( s )
else :
raise
elif other . shape [ 0 ] < self . shape [ 0 ] :
N = other . shape [ 0 ]
self . value [ : - N ] = self . value [ N : ]
else :
N = min ( self . shape [ 0 ] , other . shape [ 0 ] )
# if units are the same , can shortcut
# NOTE : why not use isinstance here ?
if type ( other ) == type ( self ) and other . unit == self . unit :
self . value [ - N : ] = other . value [ - N : ]
# otherwise if its just a numpy array
elif type ( other ) is type ( self . value ) or ( # noqa : E721
other . dtype . name . startswith ( 'uint' ) ) :
self . value [ - N : ] = other [ - N : ]
else :
self [ - N : ] = other [ - N : ]
try :
self . _xindex
except AttributeError :
if not resize :
self . x0 = self . x0 . value + other . shape [ 0 ] * self . dx . value
else :
if resize :
try :
self . xindex . resize ( ( s [ 0 ] , ) , refcheck = False )
except ValueError as exc :
if 'cannot resize' in str ( exc ) :
self . xindex = self . xindex . copy ( )
self . xindex . resize ( ( s [ 0 ] , ) )
else :
raise
else :
self . xindex [ : - other . shape [ 0 ] ] = self . xindex [ other . shape [ 0 ] : ]
try :
self . xindex [ - other . shape [ 0 ] : ] = other . _xindex
except AttributeError :
del self . xindex
if not resize :
self . x0 = self . x0 + self . dx * other . shape [ 0 ]
else :
try :
self . dx = self . xindex [ 1 ] - self . xindex [ 0 ]
except IndexError :
pass
self . x0 = self . xindex [ 0 ]
return self |
def setup_migrate ( app ) :
"""Setup flask - migrate .""" | directory = path . join ( path . dirname ( __file__ ) , 'migrations' )
migrate . init_app ( app , db , directory = directory ) |
async def restore_networking_configuration ( self ) :
"""Restore machine ' s networking configuration to its initial state .""" | self . _data = await self . _handler . restore_networking_configuration ( system_id = self . system_id ) |
def show_tracebacks ( self ) :
"""Show tracebacks""" | if self . broker . tracebacks :
for tb in self . broker . tracebacks . values ( ) : # tb = " Traceback { 0 } " . format ( str ( tb ) )
self . logit ( str ( tb ) , self . pid , self . user , "insights-run" , logging . ERROR ) |
def get_or_add_ext_rel ( self , reltype , target_ref ) :
"""Return rId of external relationship of * reltype * to * target _ ref * ,
newly added if not already present in collection .""" | rel = self . _get_matching ( reltype , target_ref , is_external = True )
if rel is None :
rId = self . _next_rId
rel = self . add_relationship ( reltype , target_ref , rId , is_external = True )
return rel . rId |
def descendants ( obj , refattrs = ( SEGMENTATION , ALIGNMENT ) , follow = 'first' ) :
"""> > > for des in query . descendants ( igt . get _ item ( ' p1 ' ) , refattrs = ( SEGMENTATION , ALIGNMENT ) ) :
. . . print ( des )
( < Tier object ( id : p type : phrases ) at . . . > , ' segmentation ' , < Tier object ( id : w type : words ) at . . . > , [ < Item object ( id : w1 ) at . . . > ] )
( < Tier object ( id : p type : phrases ) at . . . > , ' alignment ' , < Tier object ( id : t type : translations ) at . . . > , [ < Item object ( id : t1 ) at . . . > ] )
( < Tier object ( id : w type : words ) at . . . > , ' segmentation ' , < Tier object ( id : m type : morphemes ) at . . . > , [ < Item object ( id : m1 ) at . . . > ] )
( < Tier object ( id : m type : morphemes ) at . . . > , ' alignment ' , < Tier object ( id : g type : glosses ) at . . . > , [ < Item object ( id : g1 ) at . . . > ] )""" | if hasattr ( obj , 'tier' ) :
tier = obj . tier
items = [ obj ]
else :
tier = obj
items = tier . items
igt = tier . igt
visited = set ( )
agenda = deque ( [ ( tier , items ) ] )
while agenda :
tier , items = agenda . popleft ( )
tier_refs = tier . referrers ( refattrs )
item_ids = set ( item . id for item in items )
# get followable refattrs with something on the referrers list
ras = [ ra for ra in refattrs if tier_refs [ ra ] ]
if follow == 'first' and ras :
ras = [ ras [ 0 ] ]
if not ras :
continue
# unlike ancestors , descendants for a refattr may have 1 + tiers
for refattr in ras : # try to avoid cycles
if ( tier . id , refattr ) in visited :
continue
else :
visited . add ( ( tier . id , refattr ) )
for reftier_id in tier_refs [ refattr ] :
reftier = igt [ reftier_id ]
refitems = [ item for item in reftier . items if set ( ref . ids ( item . attributes . get ( refattr , '' ) ) ) . intersection ( item_ids ) ]
yield ( tier , refattr , reftier , refitems )
agenda . append ( ( reftier , refitems ) ) |
def _handle_authn_request ( self , context , binding_in , idp ) :
"""See doc for handle _ authn _ request method .
: type context : satosa . context . Context
: type binding _ in : str
: type idp : saml . server . Server
: rtype : satosa . response . Response
: param context : The current context
: param binding _ in : The pysaml binding type
: param idp : The saml frontend idp server
: return : response""" | req_info = idp . parse_authn_request ( context . request [ "SAMLRequest" ] , binding_in )
authn_req = req_info . message
satosa_logging ( logger , logging . DEBUG , "%s" % authn_req , context . state )
try :
resp_args = idp . response_args ( authn_req )
except SAMLError as e :
satosa_logging ( logger , logging . ERROR , "Could not find necessary info about entity: %s" % e , context . state )
return ServiceError ( "Incorrect request from requester: %s" % e )
requester = resp_args [ "sp_entity_id" ]
context . state [ self . name ] = self . _create_state_data ( context , idp . response_args ( authn_req ) , context . request . get ( "RelayState" ) )
subject = authn_req . subject
name_id_value = subject . name_id . text if subject else None
nameid_formats = { "from_policy" : authn_req . name_id_policy and authn_req . name_id_policy . format , "from_response" : subject and subject . name_id and subject . name_id . format , "from_metadata" : ( idp . metadata [ requester ] . get ( "spsso_descriptor" , [ { } ] ) [ 0 ] . get ( "name_id_format" , [ { } ] ) [ 0 ] . get ( "text" ) ) , "default" : NAMEID_FORMAT_TRANSIENT , }
name_id_format = ( nameid_formats [ "from_policy" ] or ( nameid_formats [ "from_response" ] != NAMEID_FORMAT_UNSPECIFIED and nameid_formats [ "from_response" ] ) or nameid_formats [ "from_metadata" ] or nameid_formats [ "from_response" ] or nameid_formats [ "default" ] )
requester_name = self . _get_sp_display_name ( idp , requester )
internal_req = InternalData ( subject_id = name_id_value , subject_type = name_id_format , requester = requester , requester_name = requester_name , )
idp_policy = idp . config . getattr ( "policy" , "idp" )
if idp_policy :
internal_req . attributes = self . _get_approved_attributes ( idp , idp_policy , requester , context . state )
return self . auth_req_callback_func ( context , internal_req ) |
def _report_problem ( self , problem , level = logging . ERROR ) :
'''Report a given problem''' | problem = self . basename + ': ' + problem
if self . _logger . isEnabledFor ( level ) :
self . _problematic = True
if self . _check_raises :
raise DapInvalid ( problem )
self . _logger . log ( level , problem ) |
def read ( self , size = - 1 ) :
"""Read at most ` size ` bytes from the file ( less if there
isn ' t enough data ) .
The bytes are returned as an instance of : class : ` str ` ( : class : ` bytes `
in python 3 ) . If ` size ` is negative or omitted all data is read .
: Parameters :
- ` size ` ( optional ) : the number of bytes to read""" | self . _ensure_file ( )
if size == 0 :
return EMPTY
remainder = int ( self . length ) - self . __position
if size < 0 or size > remainder :
size = remainder
received = 0
data = StringIO ( )
while received < size :
chunk_data = self . readchunk ( )
received += len ( chunk_data )
data . write ( chunk_data )
# Detect extra chunks .
max_chunk_n = math . ceil ( self . length / float ( self . chunk_size ) )
chunk = self . __chunks . find_one ( { "files_id" : self . _id , "n" : { "$gte" : max_chunk_n } } )
# According to spec , ignore extra chunks if they are empty .
if chunk is not None and len ( chunk [ 'data' ] ) :
raise CorruptGridFile ( "Extra chunk found: expected %i chunks but found " "chunk with n=%i" % ( max_chunk_n , chunk [ 'n' ] ) )
self . __position -= received - size
# Return ' size ' bytes and store the rest .
data . seek ( size )
self . __buffer = data . read ( )
data . seek ( 0 )
return data . read ( size ) |
def save ( path , im ) :
"""Saves an image to file .
If the image is type float , it will assume to have values in [ 0 , 1 ] .
Parameters
path : str
Path to which the image will be saved .
im : ndarray ( image )
Image .""" | from PIL import Image
if im . dtype == np . uint8 :
pil_im = Image . fromarray ( im )
else :
pil_im = Image . fromarray ( ( im * 255 ) . astype ( np . uint8 ) )
pil_im . save ( path ) |
def kvp_convert ( input_coll ) :
'''Converts a list of string attributes and / or tuples into an OrderedDict .
If passed in an OrderedDict , function is idempotent .
Key / value pairs map to ` first _ tuple _ element ` - > ` second _ tuple _ element ` if
a tuple , or ` scalar _ value ` - > None if not a tuple .
: param input _ coll : An iterable with string and / or 2 - tuple elements
: returns : collections . OrderedDict''' | if isinstance ( input_coll , OrderedDict ) :
return input_coll
else :
return OrderedDict ( ( l , None ) if not isinstance ( l , tuple ) else ( l [ 0 ] , l [ 1 ] ) for l in input_coll ) |
def add_response_headers ( h ) :
"""Add HTTP - headers to response .
Example :
@ add _ response _ headers ( { ' Refresh ' : ' 10 ' , ' X - Powered - By ' : ' Django ' } )
def view ( request ) :""" | def headers_wrapper ( fun ) :
def wrapped_function ( * args , ** kwargs ) :
response = fun ( * args , ** kwargs )
for k , v in h . iteritems ( ) :
response [ k ] = v
return response
return wrapped_function
return headers_wrapper |
def evaluate_postfix ( tokens ) :
"""Given a list of evaluatable tokens in postfix format ,
calculate a solution .""" | stack = [ ]
for token in tokens :
total = None
if is_int ( token ) or is_float ( token ) or is_constant ( token ) :
stack . append ( token )
elif is_unary ( token ) :
a = stack . pop ( )
total = mathwords . UNARY_FUNCTIONS [ token ] ( a )
elif len ( stack ) :
b = stack . pop ( )
a = stack . pop ( )
if token == '+' :
total = a + b
elif token == '-' :
total = a - b
elif token == '*' :
total = a * b
elif token == '^' :
total = a ** b
elif token == '/' :
if Decimal ( str ( b ) ) == 0 :
total = 'undefined'
else :
total = Decimal ( str ( a ) ) / Decimal ( str ( b ) )
else :
raise PostfixTokenEvaluationException ( 'Unknown token {}' . format ( token ) )
if total is not None :
stack . append ( total )
# If the stack is empty the tokens could not be evaluated
if not stack :
raise PostfixTokenEvaluationException ( 'The postfix expression resulted in an empty stack' )
return stack . pop ( ) |
def scan ( self , concurrency = 1 ) :
"""Iterates over the applications installed within the disk
and queries the CVE DB to determine whether they are vulnerable .
Concurrency controls the amount of concurrent queries
against the CVE DB .
For each vulnerable application the method yields a namedtuple :
VulnApp ( name - > application name
version - > application version
vulnerabilities ) - > list of Vulnerabilities
Vulnerability ( id - > CVE Id
summary ) - > brief description of the vulnerability""" | self . logger . debug ( "Scanning FS content." )
with ThreadPoolExecutor ( max_workers = concurrency ) as executor :
results = executor . map ( self . query_vulnerabilities , self . applications ( ) )
for report in results :
application , vulnerabilities = report
vulnerabilities = list ( lookup_vulnerabilities ( application . version , vulnerabilities ) )
if vulnerabilities :
yield VulnApp ( application . name , application . version , vulnerabilities ) |
def get_path ( self , path ) :
"""Construct a Path object from a path string .
The Path string must be declared in the API .
: type path : str
: rtype : lepo . path . Path""" | mapping = self . get_path_mapping ( path )
return self . path_class ( api = self , path = path , mapping = mapping ) |
def get_input_widget ( self , fieldname , arnum = 0 , ** kw ) :
"""Get the field widget of the AR in column < arnum >
: param fieldname : The base fieldname
: type fieldname : string""" | # temporary AR Context
context = self . get_ar ( )
# request = self . request
schema = context . Schema ( )
# get original field in the schema from the base _ fieldname
base_fieldname = fieldname . split ( "-" ) [ 0 ]
field = context . getField ( base_fieldname )
# fieldname with - < arnum > suffix
new_fieldname = self . get_fieldname ( field , arnum )
new_field = field . copy ( name = new_fieldname )
# get the default value for this field
fieldvalues = self . fieldvalues
field_value = fieldvalues . get ( new_fieldname )
# request _ value = request . form . get ( new _ fieldname )
# value = request _ value or field _ value
value = field_value
def getAccessor ( instance ) :
def accessor ( ** kw ) :
return value
return accessor
# inject the new context for the widget renderer
# see : Products . Archetypes . Renderer . render
kw [ "here" ] = context
kw [ "context" ] = context
kw [ "fieldName" ] = new_fieldname
# make the field available with this name
# XXX : This is a hack to make the widget available in the template
schema . _fields [ new_fieldname ] = new_field
new_field . getAccessor = getAccessor
# set the default value
form = dict ( )
form [ new_fieldname ] = value
self . request . form . update ( form )
logger . info ( "get_input_widget: fieldname={} arnum={} " "-> new_fieldname={} value={}" . format ( fieldname , arnum , new_fieldname , value ) )
widget = context . widget ( new_fieldname , ** kw )
return widget |
def encode ( data , encoding = None , errors = 'strict' , keep = False , preserve_dict_class = False , preserve_tuples = False ) :
'''Generic function which will encode whichever type is passed , if necessary
If ` strict ` is True , and ` keep ` is False , and we fail to encode , a
UnicodeEncodeError will be raised . Passing ` keep ` as True allows for the
original value to silently be returned in cases where encoding fails . This
can be useful for cases where the data passed to this function is likely to
contain binary blobs .''' | if isinstance ( data , Mapping ) :
return encode_dict ( data , encoding , errors , keep , preserve_dict_class , preserve_tuples )
elif isinstance ( data , list ) :
return encode_list ( data , encoding , errors , keep , preserve_dict_class , preserve_tuples )
elif isinstance ( data , tuple ) :
return encode_tuple ( data , encoding , errors , keep , preserve_dict_class ) if preserve_tuples else encode_list ( data , encoding , errors , keep , preserve_dict_class , preserve_tuples )
else :
try :
return salt . utils . stringutils . to_bytes ( data , encoding , errors )
except TypeError : # to _ bytes raises a TypeError when input is not a
# string / bytestring / bytearray . This is expected and simply
# means we are going to leave the value as - is .
pass
except UnicodeEncodeError :
if not keep :
raise
return data |
def _read_softgz ( filename ) -> AnnData :
"""Read a SOFT format data file .
The SOFT format is documented here
http : / / www . ncbi . nlm . nih . gov / geo / info / soft2 . html .
Notes
The function is based on a script by Kerby Shedden .
http : / / dept . stat . lsa . umich . edu / ~ kshedden / Python - Workshop / gene _ expression _ comparison . html""" | filename = str ( filename )
# allow passing pathlib . Path objects
import gzip
with gzip . open ( filename , mode = 'rt' ) as file : # The header part of the file contains information about the
# samples . Read that information first .
samples_info = { }
for line in file :
if line . startswith ( "!dataset_table_begin" ) :
break
elif line . startswith ( "!subset_description" ) :
subset_description = line . split ( "=" ) [ 1 ] . strip ( )
elif line . startswith ( "!subset_sample_id" ) :
subset_ids = line . split ( "=" ) [ 1 ] . split ( "," )
subset_ids = [ x . strip ( ) for x in subset_ids ]
for k in subset_ids :
samples_info [ k ] = subset_description
# Next line is the column headers ( sample id ' s )
sample_names = file . readline ( ) . strip ( ) . split ( "\t" )
# The column indices that contain gene expression data
I = [ i for i , x in enumerate ( sample_names ) if x . startswith ( "GSM" ) ]
# Restrict the column headers to those that we keep
sample_names = [ sample_names [ i ] for i in I ]
# Get a list of sample labels
groups = [ samples_info [ k ] for k in sample_names ]
# Read the gene expression data as a list of lists , also get the gene
# identifiers
gene_names , X = [ ] , [ ]
for line in file : # This is what signals the end of the gene expression data
# section in the file
if line . startswith ( "!dataset_table_end" ) :
break
V = line . split ( "\t" )
# Extract the values that correspond to gene expression measures
# and convert the strings to numbers
x = [ float ( V [ i ] ) for i in I ]
X . append ( x )
gene_names . append ( V [ 1 ] )
# Convert the Python list of lists to a Numpy array and transpose to match
# the Scanpy convention of storing samples in rows and variables in colums .
X = np . array ( X ) . T
obs = pd . DataFrame ( { "groups" : groups } , index = sample_names )
var = pd . DataFrame ( index = gene_names )
return AnnData ( X = X , obs = obs , var = var ) |
def weighted_choice ( choices , weight ) :
"""Make a random selection from the specified choices . Apply the * weight *
function to each to return a positive integer representing shares of
selection pool the choice should received . The * weight * function is passed a
single argument of the choice from the * choices * iterable .
: param choices : The choices to select from .
: type choices : list , tuple
: param weight : The function used for gather weight information for choices .
: type weight : function
: return : A randomly selected choice from the provided * choices * .""" | # requirements = random
weights = [ ]
# get weight values for each of the choices
for choice in choices :
choice_weight = weight ( choice )
if not ( isinstance ( choice_weight , int ) and choice_weight > 0 ) :
raise TypeError ( 'weight results must be positive integers' )
weights . append ( choice_weight )
# make a selection within the acceptable range
selection = random . randint ( 0 , sum ( weights ) - 1 )
# find and return the corresponding choice
for idx , choice in enumerate ( choices ) :
if selection < sum ( weights [ : idx + 1 ] ) :
return choice
raise RuntimeError ( 'no selection could be made' ) |
def db_exec_literal ( self , sql : str ) -> int :
"""Executes SQL without modification . Returns rowcount .""" | self . ensure_db_open ( )
cursor = self . db . cursor ( )
debug_sql ( sql )
try :
cursor . execute ( sql )
return cursor . rowcount
except : # nopep8
log . exception ( "db_exec_literal: SQL was: " + sql )
raise |
def parse_cl_args ( arg_vector ) :
'''Parses the command line arguments''' | parser = argparse . ArgumentParser ( description = 'Compiles markdown files into html files for remark.js' )
parser . add_argument ( 'source' , metavar = 'source' , help = 'the source to compile. If a directory is provided, all markdown files in that directory are compiled. Output is saved in the current working directory under a md2remark_build subdirectory.' )
return parser . parse_args ( arg_vector ) |
def get_domain_list ( list_name ) :
'''Retrieves a specific policy domain name list .
list _ name ( str ) : The name of the specific policy domain name list to retrieve .
CLI Example :
. . code - block : : bash
salt ' * ' bluecoat _ sslv . get _ domain _ list MyDomainNameList''' | payload = { "jsonrpc" : "2.0" , "id" : "ID0" , "method" : "get_policy_domain_names" , "params" : [ list_name , 0 , 256 ] }
response = __proxy__ [ 'bluecoat_sslv.call' ] ( payload , False )
return _convert_to_list ( response , 'item_name' ) |
def is_visit_primitive ( obj ) :
'''Returns true if properly visiting the object returns only the object itself .''' | from . base import visit
if ( isinstance ( obj , tuple ( PRIMITIVE_TYPES ) ) and not isinstance ( obj , STR ) and not isinstance ( obj , bytes ) ) :
return True
if ( isinstance ( obj , CONTAINERS ) and not isinstance ( obj , STR ) and not isinstance ( obj , bytes ) ) :
return False
if isinstance ( obj , STR ) or isinstance ( obj , bytes ) :
if len ( obj ) == 1 :
return True
return False
return list ( visit ( obj , max_enum = 2 ) ) == [ obj ] |
def check_missing_files ( client ) :
"""Find missing files listed in datasets .""" | missing = defaultdict ( list )
for path , dataset in client . datasets . items ( ) :
for file in dataset . files :
filepath = ( path . parent / file )
if not filepath . exists ( ) :
missing [ str ( path . parent . relative_to ( client . renku_datasets_path ) ) ] . append ( os . path . normpath ( str ( filepath . relative_to ( client . path ) ) ) )
if not missing :
return True
click . secho ( WARNING + 'There are missing files in datasets.' # ' \ n ( use " renku dataset clean < name > " to clean them ) '
)
for dataset , files in missing . items ( ) :
click . secho ( '\n\t' + click . style ( dataset , fg = 'yellow' ) + ':\n\t ' + '\n\t ' . join ( click . style ( path , fg = 'red' ) for path in files ) )
return False |
def _reply_json ( self , json_payload , status_code = 200 ) :
"""Return a JSON - serializable data structure""" | self . _send_headers ( status_code = status_code )
json_str = json . dumps ( json_payload )
self . wfile . write ( json_str ) |
def tokens ( istr ) :
"""Same as tokenize , but returns only tokens
( and at all parantheses levels ) .""" | # make a list of all alphanumeric tokens
toks = re . findall ( r'[^\*\\\+\-\^\(\)]+\(?' , istr )
# remove the functions
return [ t for t in toks if not t . endswith ( '(' ) ] |
def delete_project ( self , project_name ) :
"""delete project
Unsuccessful opertaion will cause an LogException .
: type project _ name : string
: param project _ name : the Project name
: return : DeleteProjectResponse
: raise : LogException""" | headers = { }
params = { }
resource = "/"
( resp , header ) = self . _send ( "DELETE" , project_name , None , resource , params , headers )
return DeleteProjectResponse ( header , resp ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.