signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def write_all ( self ) :
"""Write out all registered config files .""" | [ self . write ( k ) for k in six . iterkeys ( self . templates ) ] |
def check ( state_engine , nameop , block_id , checked_ops ) :
"""Verify the validity of an update to a name ' s associated data .
Use the nameop ' s 128 - bit name hash to find the name itself .
NAME _ UPDATE isn ' t allowed during an import , so the name ' s namespace must be ready .
Return True if accepted
Return False if not .""" | name_consensus_hash = nameop [ 'name_consensus_hash' ]
sender = nameop [ 'sender' ]
# deny updates if we exceed quota - - the only legal operations are to revoke or transfer .
sender_names = state_engine . get_names_owned_by_sender ( sender )
if len ( sender_names ) > MAX_NAMES_PER_SENDER :
log . warning ( "Sender '%s' has exceeded quota: only transfers or revokes are allowed" % ( sender ) )
return False
name , consensus_hash = state_engine . get_name_from_name_consensus_hash ( name_consensus_hash , sender , block_id )
# name must exist
if name is None or consensus_hash is None :
log . warning ( "Unable to resolve name consensus hash '%s' to a name owned by '%s'" % ( name_consensus_hash , sender ) )
# nothing to do - - write is stale or on a fork
return False
namespace_id = get_namespace_from_name ( name )
name_rec = state_engine . get_name ( name )
if name_rec is None :
log . warning ( "Name '%s' does not exist" % name )
return False
# namespace must be ready
if not state_engine . is_namespace_ready ( namespace_id ) : # non - existent namespace
log . warning ( "Namespace '%s' is not ready" % ( namespace_id ) )
return False
# name must not be revoked
if state_engine . is_name_revoked ( name ) :
log . warning ( "Name '%s' is revoked" % name )
return False
# name must not be expired as of the * last block processed *
if state_engine . is_name_expired ( name , state_engine . lastblock ) :
log . warning ( "Name '%s' is expired" % name )
return False
# name must not be in grace period in * this * block
if state_engine . is_name_in_grace_period ( name , block_id ) :
log . warning ( "Name '{}' is in the renewal grace period. It can only be renewed at this time." . format ( name ) )
return False
# the name must be registered
if not state_engine . is_name_registered ( name ) : # doesn ' t exist
log . warning ( "Name '%s' is not registered" % name )
return False
# the name must be owned by the same person who sent this nameop
if not state_engine . is_name_owner ( name , sender ) : # wrong owner
log . warning ( "Name '%s' is not owned by '%s'" % ( name , sender ) )
return False
# remember the name and consensus hash , so we don ' t have to re - calculate it . . .
nameop [ 'name' ] = name
nameop [ 'consensus_hash' ] = consensus_hash
nameop [ 'sender_pubkey' ] = name_rec [ 'sender_pubkey' ]
# not stored , but re - calculateable
del nameop [ 'name_consensus_hash' ]
return True |
def identical_format_path ( fmt1 , fmt2 ) :
"""Do the two ( long representation ) of formats target the same file ?""" | for key in [ 'extension' , 'prefix' , 'suffix' ] :
if fmt1 . get ( key ) != fmt2 . get ( key ) :
return False
return True |
def arange_col ( n , dtype = int ) :
"""Returns ` ` np . arange ` ` in a column form .
: param n : Length of the array .
: type n : int
: param dtype : Type of the array .
: type dtype : type
: returns : ` ` np . arange ` ` in a column form .
: rtype : ndarray""" | return np . reshape ( np . arange ( n , dtype = dtype ) , ( n , 1 ) ) |
def run ( self , fnc_name , hdrs_usr ) :
"""Read csv / tsv file and return specified data in a list of lists .""" | fnc = self . fncs [ fnc_name ]
with open ( self . fin ) as fin_stream :
for lnum , line in enumerate ( fin_stream ) :
line = line . rstrip ( '\r\n' )
# chomp
# Obtain Data if headers have been collected from the first line
if self . hdr2idx :
self . _init_data_line ( fnc , lnum , line )
# Obtain the header
else :
self . do_hdr ( line , hdrs_usr )
if self . log is not None :
self . log . write ( " {:9} data READ: {}\n" . format ( len ( self . ret_list ) , self . fin ) )
return self . ret_list , self . hdr2idx |
def print_tree ( sent , token_attr ) :
"""Prints sentences tree as string using token _ attr from token ( like pos _ , tag _ etc . )
: param sent : sentence to print
: param token _ attr : choosen attr to present for tokens ( e . g . dep _ , pos _ , tag _ , . . . )""" | def __print_sent__ ( token , attr ) :
print ( "{" , end = " " )
[ __print_sent__ ( t , attr ) for t in token . lefts ]
print ( u"%s->%s(%s)" % ( token , token . dep_ , token . tag_ if not attr else getattr ( token , attr ) ) , end = "" )
[ __print_sent__ ( t , attr ) for t in token . rights ]
print ( "}" , end = " " )
return __print_sent__ ( sent . root , token_attr ) |
def create_new_values ( self ) :
"""Create values created by the user input . Return the model instances QS .""" | model = self . queryset . model
pks = [ ]
extra_create_kwargs = self . extra_create_kwargs ( )
for value in self . _new_values :
create_kwargs = { self . create_field : value }
create_kwargs . update ( extra_create_kwargs )
new_item = self . create_item ( ** create_kwargs )
pks . append ( new_item . pk )
return model . objects . filter ( pk__in = pks ) |
def get_queue_url ( queue_name ) :
"""Get the URL of the SQS queue to send events to .""" | client = boto3 . client ( "sqs" , CURRENT_REGION )
queue = client . get_queue_url ( QueueName = queue_name )
return queue [ "QueueUrl" ] |
def parse_response ( self , connection , command_name , ** options ) :
"Parses a response from the Redis server" | try :
response = connection . read_response ( )
except ResponseError :
if EMPTY_RESPONSE in options :
return options [ EMPTY_RESPONSE ]
raise
if command_name in self . response_callbacks :
return self . response_callbacks [ command_name ] ( response , ** options )
return response |
def move_next_cache ( self ) :
"""Moves files in the ' next ' cache dir to the root""" | if not os . path . isdir ( self . songcache_next_dir ) :
return
logger . debug ( "Moving next cache" )
files = os . listdir ( self . songcache_next_dir )
for f in files :
try :
os . rename ( "{}/{}" . format ( self . songcache_next_dir , f ) , "{}/{}" . format ( self . songcache_dir , f ) )
except PermissionError :
pass
except Exception as e :
logger . exception ( e )
logger . debug ( "Next cache moved" ) |
def mosaicMethod ( self , value ) :
"""get / set the mosaic method""" | if value in self . __allowedMosaicMethods and self . _mosaicMethod != value :
self . _mosaicMethod = value |
def _config_bootstrap ( self ) -> None :
"""Handle the basic setup of the tool prior to user control .
Bootstrap will load all the available modules for searching and set
them up for use by this main class .""" | if self . output :
self . folder : str = os . getcwd ( ) + "/" + self . project
os . mkdir ( self . folder ) |
async def create_invite ( self , * , reason = None , ** fields ) :
"""| coro |
Creates an instant invite .
You must have : attr : ` ~ . Permissions . create _ instant _ invite ` permission to
do this .
Parameters
max _ age : : class : ` int `
How long the invite should last . If it ' s 0 then the invite
doesn ' t expire . Defaults to 0.
max _ uses : : class : ` int `
How many uses the invite could be used for . If it ' s 0 then there
are unlimited uses . Defaults to 0.
temporary : : class : ` bool `
Denotes that the invite grants temporary membership
( i . e . they get kicked after they disconnect ) . Defaults to False .
unique : : class : ` bool `
Indicates if a unique invite URL should be created . Defaults to True .
If this is set to False then it will return a previously created
invite .
reason : Optional [ : class : ` str ` ]
The reason for creating this invite . Shows up on the audit log .
Raises
HTTPException
Invite creation failed .
Returns
: class : ` Invite `
The invite that was created .""" | data = await self . _state . http . create_invite ( self . id , reason = reason , ** fields )
return Invite . from_incomplete ( data = data , state = self . _state ) |
def post ( self , * args , ** kwargs ) :
"""Check if the current step is still available . It might not be if
conditions have changed .""" | if self . steps . current not in self . steps . all :
logger . warning ( "Current step '%s' is no longer valid, returning " "to last valid step in the wizard." , self . steps . current )
return self . render_goto_step ( self . steps . all [ - 1 ] )
# - - Duplicated code from upstream
# Look for a wizard _ goto _ step element in the posted data which
# contains a valid step name . If one was found , render the requested
# form . ( This makes stepping back a lot easier ) .
wizard_goto_step = self . request . POST . get ( 'wizard_goto_step' , None )
if wizard_goto_step and wizard_goto_step in self . get_form_list ( ) :
return self . render_goto_step ( wizard_goto_step )
# Check if form was refreshed
management_form = ManagementForm ( self . request . POST , prefix = self . prefix )
if not management_form . is_valid ( ) :
raise SuspiciousOperation ( _ ( 'ManagementForm data is missing or has been tampered with' ) )
form_current_step = management_form . cleaned_data [ 'current_step' ]
if ( form_current_step != self . steps . current and self . storage . current_step is not None ) : # form refreshed , change current step
self . storage . current_step = form_current_step
# - - End duplicated code from upstream
# This is different from the first check , as this checks
# if the new step is available . See issue # 65.
if self . steps . current not in self . steps . all :
logger . warning ( "Requested step '%s' is no longer valid, returning " "to last valid step in the wizard." , self . steps . current )
return self . render_goto_step ( self . steps . all [ - 1 ] )
return super ( IdempotentSessionWizardView , self ) . post ( * args , ** kwargs ) |
def rotate ( matrix , angle ) :
r"""Rotate
This method rotates an input matrix about the input angle .
Parameters
matrix : np . ndarray
Input matrix array
angle : float
Rotation angle in radians
Returns
np . ndarray rotated matrix
Raises
ValueError
For invalid matrix shape
Examples
> > > from modopt . math . matrix import rotate
> > > a = np . arange ( 9 ) . reshape ( 3 , 3)
> > > rotate ( a , np . pi / 2)
array ( [ [ 2 , 5 , 8 ] ,
[1 , 4 , 7 ] ,
[0 , 3 , 6 ] ] )""" | shape = np . array ( matrix . shape )
if shape [ 0 ] != shape [ 1 ] :
raise ValueError ( 'Input matrix must be square.' )
shift = ( shape - 1 ) // 2
index = np . array ( list ( product ( * np . array ( [ np . arange ( val ) for val in shape ] ) ) ) ) - shift
new_index = np . array ( np . dot ( index , rot_matrix ( angle ) ) , dtype = 'int' ) + shift
new_index [ new_index >= shape [ 0 ] ] -= shape [ 0 ]
return matrix [ tuple ( zip ( new_index . T ) ) ] . reshape ( shape . T ) |
def analysis ( self , frames = 'all' , ncpus = 1 , _ncpus = 1 , override = False , ** kwargs ) :
"""Perform structural analysis on a frame / set of frames .
Depending on the passed parameters a frame , a list of particular
frames , a range of frames ( from , to ) , or all frames can be analysed
with this function .
The analysis is performed on each frame and each discrete molecule in
that frame separately . The steps are as follows :
1 . A frame is extracted and returned as a : class : ` MolecularSystem ` .
2 . If ` swap _ atoms ` is set the atom ids are swapped .
3 . If ` forcefield ` is set the atom ids are deciphered .
4 . If ` rebuild ` is set the molecules in the system are rebuild .
5 . Each discrete molecule is extracted as : class : ` Molecule `
6 . Each molecule is analysed with : func : ` Molecule . full _ analysis ( ) `
7 . Analysis output populates the : attr : ` analysis _ output ` dictionary .
As the analysis of trajectories often have to be unique , many options
are conditional .
A side effect of this function is that the analysed frames are also
returned to the : attr : ` frames ` mimicking the behaviour of the
: func : ` get _ frames ( ) ` .
Parameters
frames : : class : ` int ` or : class : ` list ` or : class : ` touple ` or : class : ` str `
Specified frame ( : class : ` int ` ) , or frames ( : class : ` list ` ) , or
range ( : class : ` touple ` ) , or ` all ` / ` everything ` ( : class : ` str ` ) .
( default = ' all ' )
override : : class : ` bool `
If True , an output already storred in : attr : ` analysis _ output ` can
be override . ( default = False )
swap _ atoms : : class : ` dict ` , optional
If this kwarg is passed with an appropriate dictionary a
: func : ` pywindow . molecular . MolecularSystem . swap _ atom _ keys ( ) ` will
be applied to the extracted frame .
forcefield : : class : ` str ` , optional
If this kwarg is passed with appropriate forcefield keyword a
: func : ` pywindow . molecular . MolecularSystem . decipher _ atom _ keys ( ) `
will be applied to the extracted frame .
modular : : class : ` bool ` , optional
If this kwarg is passed a
: func : ` pywindow . molecular . MolecularSystem . make _ modular ( ) `
will be applied to the extracted frame . ( default = False )
rebuild : : class : ` bool ` , optional
If this kwarg is passed a ` rebuild = True ` is passed to
: func : ` pywindow . molecular . MolecularSystem . make _ modular ( ) ` that
will be applied to the extracted frame . ( default = False )
ncpus : : class : ` int ` , optional
If ncpus > 1 , then the analysis is performed in parallel for the
specified number of parallel jobs . Otherwise , it runs in serial .
( default = 1)
Returns
None : : class : ` NoneType `
The function returns ` None ` , the analysis output is
returned to : attr : ` analysis _ output ` dictionary .""" | frames_for_analysis = [ ]
# First populate the frames _ for _ analysis list .
if isinstance ( frames , int ) :
frames_for_analysis . append ( frames )
if isinstance ( frames , list ) :
for frame in frames :
if isinstance ( frame , int ) :
frames_for_analysis . append ( frame )
else :
raise _FunctionError ( "The list should be populated with integers only." )
if isinstance ( frames , tuple ) :
if isinstance ( frames [ 0 ] , int ) and isinstance ( frames [ 1 ] , int ) :
for frame in range ( frames [ 0 ] , frames [ 1 ] ) :
frames_for_analysis . append ( frame )
else :
raise _FunctionError ( "The tuple should contain only two integers " "for the begining and the end of the frames range." )
if isinstance ( frames , str ) :
if frames in [ 'all' , 'everything' ] :
for frame in range ( 0 , self . no_of_frames ) :
frames_for_analysis . append ( frame )
else :
raise _FunctionError ( "Didn't recognise the keyword. (see manual)" )
# The override keyword by default is False . So we check if any of the
# frames were already analysed and if so we delete them from the list .
# However , if the override is set to True , then we just proceed .
if override is False :
frames_for_analysis_new = [ ]
for frame in frames_for_analysis :
if frame not in self . analysis_output . keys ( ) :
frames_for_analysis_new . append ( frame )
frames_for_analysis = frames_for_analysis_new
if ncpus == 1 :
for frame in frames_for_analysis :
analysed_frame = self . _analysis_serial ( frame , _ncpus , ** kwargs )
self . analysis_output [ frame ] = analysed_frame
if ncpus > 1 :
self . _analysis_parallel ( frames_for_analysis , ncpus , ** kwargs ) |
def load ( self , fobj , index = None ) :
"""Loads given DataFile object . * * tolerant with None * *
Args :
fobj : object of one of accepted classes
index : tab index to load fobj into . If not passed , loads into current tab""" | if index is None :
index = self . _get_tab_index ( )
page = self . pages [ index ]
if fobj is None :
return
if not isinstance ( fobj , tuple ( page . clss_load ) ) :
raise RuntimeError ( 'Object to load must be in {0!s} (not a {1!s})' . format ( [ x . __name__ for x in page . clss_load ] , fobj . __class__ . __name__ ) )
page . editor . load ( fobj )
self . _update_gui_text_tabs ( ) |
def estimate_param_scan ( estimator , X , param_sets , evaluate = None , evaluate_args = None , failfast = True , return_estimators = False , n_jobs = 1 , progress_reporter = None , show_progress = True , return_exceptions = False ) :
"""Runs multiple estimations using a list of parameter settings
Parameters
estimator : Estimator object or class
An estimator object that provides an estimate ( X , * * params ) function .
If only a class is provided here , the Estimator objects will be
constructed with default parameter settings , and the parameter settings
from param _ sets for each estimation . If you want to specify other
parameter settings for those parameters not specified in param _ sets ,
construct an Estimator before and pass the object .
param _ sets : iterable over dictionaries
An iterable that provides parameter settings . Each element defines a
parameter set , for which an estimation will be run using these
parameters in estimate ( X , * * params ) . All other parameter settings will
be taken from the default settings in the estimator object .
evaluate : str or list of str , optional
The given methods or properties will be called on the estimated
models , and their results will be returned instead of the full models .
This may be useful for reducing memory overhead .
evaluate _ args : iterable of iterable , optional
Arguments to be passed to evaluated methods . Note , that size has to match to the size of evaluate .
failfast : bool
If True , will raise an exception when estimation failed with an exception
or trying to calls a method that doesn ' t exist . If False , will simply
return None in these cases .
return _ estimators : bool
If True , return a list estimators in addition to the models .
show _ progress : bool
if the given estimator supports show _ progress interface , we set the flag
prior doing estimations .
return _ exceptions : bool , default = False
if failfast is False while this setting is True , returns the exception thrown at the actual grid element ,
instead of None .
Returns
models : list of model objects or evaluated function values
A list of estimated models in the same order as param _ sets . If evaluate
is given , each element will contain the results from these method
evaluations .
estimators ( optional ) : list of estimator objects . These are returned only
if return _ estimators = True
Examples
Estimate a maximum likelihood Markov model at lag times 1 , 2 , 3.
> > > from pyemma . msm . estimators import MaximumLikelihoodMSM , BayesianMSM
> > > dtraj = [ 0,0,1,2,1,0,1,0,1,2,2,0,0,0,1,1,2,1,0,0,1,2,1,0,0,0,1,1,0,1,2 ] # mini - trajectory
> > > param _ sets = param _ grid ( { ' lag ' : [ 1,2,3 ] } )
> > > estimate _ param _ scan ( MaximumLikelihoodMSM , dtraj , param _ sets , evaluate = ' timescales ' )
[ array ( [ 1.24113168 , 0.77454377 ] ) , array ( [ 2.65266698 , 1.42909842 ] ) , array ( [ 5.34810405 , 1.14784446 ] ) ]
Now we also want to get samples of the timescales using the BayesianMSM .
> > > estimate _ param _ scan ( MaximumLikelihoodMSM , dtraj , param _ sets , failfast = False ,
. . . evaluate = [ ' timescales ' , ' timescales _ samples ' ] ) # doctest : + SKIP
[ [ array ( [ 1.24113168 , 0.77454377 ] ) , None ] , [ array ( [ 2.48226337 , 1.54908754 ] ) , None ] , [ array ( [ 3.72339505 , 2.32363131 ] ) , None ] ]
We get Nones because the MaximumLikelihoodMSM estimator doesn ' t provide timescales _ samples . Use for example
a Bayesian estimator for that .
Now we also want to get samples of the timescales using the BayesianMSM .
> > > estimate _ param _ scan ( BayesianMSM , dtraj , param _ sets , show _ progress = False ,
. . . evaluate = [ ' timescales ' , ' sample _ f ' ] , evaluate _ args = ( ( ) , ( ' timescales ' , ) ) ) # doctest : + SKIP
[ [ array ( [ 1.24357685 , 0.77609028 ] ) , [ array ( [ 1.5963252 , 0.73877883 ] ) , array ( [ 1.29915847 , 0.49004912 ] ) , array ( [ 0.90058583 , 0.73841786 ] ) , . . . ] ]""" | # make sure we have an estimator object
estimator = get_estimator ( estimator )
if hasattr ( estimator , 'show_progress' ) :
estimator . show_progress = show_progress
if n_jobs is None :
from pyemma . _base . parallel import get_n_jobs
n_jobs = get_n_jobs ( logger = getattr ( estimator , 'logger' , None ) )
# if we want to return estimators , make clones . Otherwise just copy references .
# For parallel processing we always need clones .
# Also if the Estimator is its own Model , we have to clone .
from pyemma . _base . model import Model
if ( return_estimators or n_jobs > 1 or n_jobs is None or isinstance ( estimator , Model ) ) :
estimators = [ clone_estimator ( estimator ) for _ in param_sets ]
else :
estimators = [ estimator for _ in param_sets ]
# only show progress of parameter study .
if hasattr ( estimators [ 0 ] , 'show_progress' ) :
for e in estimators :
e . show_progress = False
# if we evaluate , make sure we have a list of functions to evaluate
if _types . is_string ( evaluate ) :
evaluate = [ evaluate ]
if _types . is_string ( evaluate_args ) :
evaluate_args = [ evaluate_args ]
if evaluate is not None and evaluate_args is not None and len ( evaluate ) != len ( evaluate_args ) :
raise ValueError ( "length mismatch: evaluate ({}) and evaluate_args ({})" . format ( len ( evaluate ) , len ( evaluate_args ) ) )
logger_available = hasattr ( estimators [ 0 ] , 'logger' )
if logger_available :
logger = estimators [ 0 ] . logger
if progress_reporter is None :
from unittest . mock import MagicMock
ctx = progress_reporter = MagicMock ( )
callback = None
else :
ctx = progress_reporter . _progress_context ( 'param-scan' )
callback = lambda _ : progress_reporter . _progress_update ( 1 , stage = 'param-scan' )
progress_reporter . _progress_register ( len ( estimators ) , stage = 'param-scan' , description = "estimating %s" % str ( estimator . __class__ . __name__ ) )
# TODO : test on win , osx
if n_jobs > 1 and os . name == 'posix' :
if logger_available :
logger . debug ( 'estimating %s with n_jobs=%s' , estimator , n_jobs )
# iterate over parameter settings
task_iter = ( ( estimator , param_set , X , evaluate , evaluate_args , failfast , return_exceptions ) for estimator , param_set in zip ( estimators , param_sets ) )
from pathos . multiprocessing import Pool
pool = Pool ( processes = n_jobs )
args = list ( task_iter )
from contextlib import closing
def error_callback ( * args , ** kw ) :
if failfast : # TODO : can we be specific here ? eg . obtain the stack of the actual process or is this the master proc ?
raise Exception ( 'something failed' )
with closing ( pool ) , ctx :
res_async = [ pool . apply_async ( _estimate_param_scan_worker , a , callback = callback , error_callback = error_callback ) for a in args ]
res = [ x . get ( ) for x in res_async ]
# if n _ jobs = 1 don ' t invoke the pool , but directly dispatch the iterator
else :
if logger_available :
logger . debug ( 'estimating %s with n_jobs=1 because of the setting or ' 'you not have a POSIX system' , estimator )
res = [ ]
with ctx :
for estimator , param_set in zip ( estimators , param_sets ) :
res . append ( _estimate_param_scan_worker ( estimator , param_set , X , evaluate , evaluate_args , failfast , return_exceptions ) )
if progress_reporter is not None :
progress_reporter . _progress_update ( 1 , stage = 'param-scan' )
# done
if return_estimators :
return res , estimators
else :
return res |
async def setup_hostname ( ) -> str :
"""Intended to be run when the server starts . Sets the machine hostname .
The machine hostname is set from the systemd - generated machine - id , which
changes at every boot .
Once the hostname is set , we restart avahi .
This is a separate task from establishing and changing the opentrons
machine name , which is UTF - 8 and stored in / etc / machine - info as the
PRETTY _ HOSTNAME and used in the avahi service name .
: returns : the hostname""" | machine_id = open ( '/etc/machine-id' ) . read ( ) . strip ( )
hostname = machine_id [ : 6 ]
with open ( '/etc/hostname' , 'w' ) as ehn :
ehn . write ( f'{hostname}\n' )
# First , we run hostnamed which will set the transient hostname
# and loaded static hostname from the value we just wrote to
# / etc / hostname
LOG . debug ( "Setting hostname" )
proc = await asyncio . create_subprocess_exec ( 'hostname' , '-F' , '/etc/hostname' , stdout = asyncio . subprocess . PIPE , stderr = asyncio . subprocess . PIPE )
stdout , stderr = await proc . communicate ( )
ret = proc . returncode
if ret != 0 :
LOG . error ( f'Error starting hostname: {ret} ' f'stdout: {stdout} stderr: {stderr}' )
raise RuntimeError ( "Couldn't run hostname" )
# Then , with the hostname set , we can restart avahi
LOG . debug ( "Restarting avahi" )
proc = await asyncio . create_subprocess_exec ( 'systemctl' , 'restart' , 'avahi-daemon' , stdout = asyncio . subprocess . PIPE , stderr = asyncio . subprocess . PIPE )
stdout , stderr = await proc . communicate ( )
ret = proc . returncode
if ret != 0 :
LOG . error ( f'Error restarting avahi-daemon: {ret} ' f'stdout: {stdout} stderr: {stderr}' )
raise RuntimeError ( "Error restarting avahi" )
LOG . debug ( "Updated hostname and restarted avahi OK" )
return hostname |
def fit_mle ( self , data , b = None ) :
"""% ( super ) s
b : float
The upper bound of the distribution . If None , fixed at sum ( data )""" | data = np . array ( data )
length = len ( data )
if not b :
b = np . sum ( data )
return _trunc_logser_solver ( length , b ) , b |
def _pick_idp ( self , came_from ) :
"""If more than one idp and if none is selected , I have to do wayf or
disco""" | _cli = self . sp
logger . debug ( "[_pick_idp] %s" , self . environ )
if "HTTP_PAOS" in self . environ :
if self . environ [ "HTTP_PAOS" ] == PAOS_HEADER_INFO :
if MIME_PAOS in self . environ [ "HTTP_ACCEPT" ] : # Where should I redirect the user to
# entityid - > the IdP to use
# relay _ state - > when back from authentication
logger . debug ( "- ECP client detected -" )
_rstate = rndstr ( )
self . cache . relay_state [ _rstate ] = geturl ( self . environ )
_entityid = _cli . config . ecp_endpoint ( self . environ [ "REMOTE_ADDR" ] )
if not _entityid :
return - 1 , ServiceError ( "No IdP to talk to" )
logger . debug ( "IdP to talk to: %s" , _entityid )
return ecp . ecp_auth_request ( _cli , _entityid , _rstate )
else :
return - 1 , ServiceError ( "Faulty Accept header" )
else :
return - 1 , ServiceError ( "unknown ECP version" )
# Find all IdPs
idps = self . sp . metadata . with_descriptor ( "idpsso" )
idp_entity_id = None
kaka = self . environ . get ( "HTTP_COOKIE" , "" )
if kaka :
try :
( idp_entity_id , _ ) = parse_cookie ( "ve_disco" , "SEED_SAW" , kaka )
except ValueError :
pass
except TypeError :
pass
# Any specific IdP specified in a query part
query = self . environ . get ( "QUERY_STRING" )
if not idp_entity_id and query :
try :
_idp_entity_id = dict ( parse_qs ( query ) ) [ self . idp_query_param ] [ 0 ]
if _idp_entity_id in idps :
idp_entity_id = _idp_entity_id
except KeyError :
logger . debug ( "No IdP entity ID in query: %s" , query )
pass
if not idp_entity_id :
if self . wayf :
if query :
try :
wayf_selected = dict ( parse_qs ( query ) ) [ "wayf_selected" ] [ 0 ]
except KeyError :
return self . _wayf_redirect ( came_from )
idp_entity_id = wayf_selected
else :
return self . _wayf_redirect ( came_from )
elif self . discosrv :
if query :
idp_entity_id = _cli . parse_discovery_service_response ( query = self . environ . get ( "QUERY_STRING" ) )
if not idp_entity_id :
sid_ = sid ( )
self . cache . outstanding_queries [ sid_ ] = came_from
logger . debug ( "Redirect to Discovery Service function" )
eid = _cli . config . entityid
ret = _cli . config . getattr ( "endpoints" , "sp" ) [ "discovery_response" ] [ 0 ] [ 0 ]
ret += "?sid=%s" % sid_
loc = _cli . create_discovery_service_request ( self . discosrv , eid , ** { "return" : ret } )
return - 1 , SeeOther ( loc )
elif len ( idps ) == 1 : # idps is a dictionary
idp_entity_id = list ( idps . keys ( ) ) [ 0 ]
elif not len ( idps ) :
return - 1 , ServiceError ( "Misconfiguration" )
else :
return - 1 , NotImplemented ( "No WAYF or DS present!" )
logger . info ( "Chosen IdP: '%s'" , idp_entity_id )
return 0 , idp_entity_id |
def parse_metric_family ( self , response , scraper_config ) :
"""Parse the MetricFamily from a valid requests . Response object to provide a MetricFamily object ( see [ 0 ] )
The text format uses iter _ lines ( ) generator .
: param response : requests . Response
: return : core . Metric""" | input_gen = response . iter_lines ( chunk_size = self . REQUESTS_CHUNK_SIZE , decode_unicode = True )
if scraper_config [ '_text_filter_blacklist' ] :
input_gen = self . _text_filter_input ( input_gen , scraper_config )
for metric in text_fd_to_metric_families ( input_gen ) :
metric . type = scraper_config [ 'type_overrides' ] . get ( metric . name , metric . type )
if metric . type not in self . METRIC_TYPES :
continue
metric . name = self . _remove_metric_prefix ( metric . name , scraper_config )
yield metric |
def server_poweroff ( host = None , admin_username = None , admin_password = None , module = None ) :
'''Powers down the managed server .
host
The chassis host .
admin _ username
The username used to access the chassis .
admin _ password
The password used to access the chassis .
module
The element to power off on the chassis such as a blade .
If not provided , the chassis will be powered off .
CLI Example :
. . code - block : : bash
salt dell dracr . server _ poweroff
salt dell dracr . server _ poweroff module = server - 1''' | return __execute_cmd ( 'serveraction powerdown' , host = host , admin_username = admin_username , admin_password = admin_password , module = module ) |
def get_plist_data_from_string ( data ) :
"""Parse plist data for a string . Tries biplist , falling back to
plistlib .""" | if has_biplist :
return biplist . readPlistFromString ( data )
# fall back to normal plistlist
try :
return plistlib . readPlistFromString ( data )
except Exception : # not parseable ( eg . not well - formed , or binary )
return { } |
def powernode_data ( self , name : str ) -> Powernode :
"""Return a Powernode object describing the given powernode""" | self . assert_powernode ( name )
contained_nodes = frozenset ( self . nodes_in ( name ) )
return Powernode ( size = len ( contained_nodes ) , contained = frozenset ( self . all_in ( name ) ) , contained_pnodes = frozenset ( self . powernodes_in ( name ) ) , contained_nodes = contained_nodes , ) |
def validate_proof ( proof : List [ Keccak256 ] , root : Keccak256 , leaf_element : Keccak256 ) -> bool :
"""Checks that ` leaf _ element ` was contained in the tree represented by
` merkleroot ` .""" | hash_ = leaf_element
for pair in proof :
hash_ = hash_pair ( hash_ , pair )
return hash_ == root |
def complete_opt ( self , text , line , begidx , endidx ) :
"""Autocomplete for options""" | tokens = line . split ( )
if len ( tokens ) == 1 :
if text :
return
else :
option = ""
else :
option = tokens [ 1 ]
if len ( tokens ) == 1 or ( len ( tokens ) == 2 and text ) :
return [ name [ 4 : ] + " " for name in dir ( self ) if name . startswith ( "opt_" + text ) ]
method = getattr ( self , "complete_opt_" + option , None )
if method is not None :
return method ( text , line , begidx , endidx ) |
def _url_chunk_join ( self , * args ) :
"""Join the arguments together to form a predictable URL chunk .""" | # Strip slashes from either side of each path piece .
pathlets = map ( lambda s : s . strip ( '/' ) , args )
# Remove empty pieces .
pathlets = filter ( None , pathlets )
url = '/' . join ( pathlets )
# If this is a directory , add a trailing slash .
if args [ - 1 ] . endswith ( '/' ) :
url = '%s/' % url
return url |
def loadmask ( self , filename : str ) -> np . ndarray :
"""Load a mask file .""" | mask = scipy . io . loadmat ( self . find_file ( filename , what = 'mask' ) )
maskkey = [ k for k in mask . keys ( ) if not ( k . startswith ( '_' ) or k . endswith ( '_' ) ) ] [ 0 ]
return mask [ maskkey ] . astype ( np . bool ) |
def create_user ( username , password = None , email = None , first_name = "" , last_name = "" , role = "MEMBER" , login_method = None ) :
"""Create a new user
: param username :
: param password :
: param email :
: param first _ name :
: param last _ name :
: param role : str
: return : AuthUser""" | if not login_method :
login_method = "email" if "@" in username else "username"
def cb ( ) :
return _user ( models . AuthUser . new ( username = username , password = password , email = email , first_name = first_name , last_name = last_name , login_method = login_method , role = role ) )
return signals . create_user ( cb ) |
def main ( self , spin , data ) :
"""The function which uses irc rfc regex to extract
the basic arguments from the msg .""" | data = data . decode ( self . encoding )
field = re . match ( RFC_REG , data )
if not field :
return
prefix = self . extract_prefix ( field . group ( 'prefix' ) )
command = field . group ( 'command' ) . upper ( )
args = self . extract_args ( field . group ( 'arguments' ) )
spawn ( spin , command , * ( prefix + args ) ) |
def wait ( self , timeout : Optional [ float ] = None ) -> None :
"""Makes the current process wait for the signal . If it is closed , it will join the signal ' s queue .
: param timeout :
If this parameter is not ` ` None ` ` , it is taken as a delay at the end of which the process times out , and
stops waiting for the : py : class : ` Signal ` . In such a situation , a : py : class : ` Timeout ` exception is raised on
the process .""" | if _logger is not None :
self . _log ( INFO , "wait" )
while not self . is_on :
self . _queue . join ( timeout ) |
def forward ( self , X ) :
"""Forward function .
: param X : The input ( batch ) of the model contains word sequences for lstm ,
features and feature weights .
: type X : For word sequences : a list of torch . Tensor pair ( word sequence
and word mask ) of shape ( batch _ size , sequence _ length ) .
For features : torch . Tensor of shape ( batch _ size , sparse _ feature _ size ) .
For feature weights : torch . Tensor of shape
( batch _ size , sparse _ feature _ size ) .
: return : The output of LSTM layer .
: rtype : torch . Tensor of shape ( batch _ size , num _ classes )""" | s = X [ : - 2 ]
f = X [ - 2 ]
w = X [ - 1 ]
batch_size = len ( f )
# Generate lstm weight indices
x_idx = self . _cuda ( torch . as_tensor ( np . arange ( 1 , self . settings [ "lstm_dim" ] + 1 ) ) . repeat ( batch_size , 1 ) )
outputs = self . _cuda ( torch . Tensor ( [ ] ) )
# Calculate textual features from LSTMs
for i in range ( len ( s ) ) :
state_word = self . lstms [ 0 ] . init_hidden ( batch_size )
output = self . lstms [ 0 ] . forward ( s [ i ] [ 0 ] , s [ i ] [ 1 ] , state_word )
outputs = torch . cat ( ( outputs , output ) , 1 )
# Concatenate textual features with multi - modal features
feaures = torch . cat ( ( x_idx , f ) , 1 )
weights = torch . cat ( ( outputs , w ) , 1 )
return self . sparse_linear ( feaures , weights ) |
def orient_import2 ( self , event ) :
"""initialize window to import an AzDip format file into the working directory""" | pmag_menu_dialogs . ImportAzDipFile ( self . parent , self . parent . WD ) |
def _inferSchemaFromList ( self , data , names = None ) :
"""Infer schema from list of Row or tuple .
: param data : list of Row or tuple
: param names : list of column names
: return : : class : ` pyspark . sql . types . StructType `""" | if not data :
raise ValueError ( "can not infer schema from empty dataset" )
first = data [ 0 ]
if type ( first ) is dict :
warnings . warn ( "inferring schema from dict is deprecated," "please use pyspark.sql.Row instead" )
schema = reduce ( _merge_type , ( _infer_schema ( row , names ) for row in data ) )
if _has_nulltype ( schema ) :
raise ValueError ( "Some of types cannot be determined after inferring" )
return schema |
async def connect ( url , * , apikey = None , insecure = False ) :
"""Connect to a remote MAAS instance with ` apikey ` .
Returns a new : class : ` Profile ` which has NOT been saved . To connect AND
save a new profile : :
profile = connect ( url , apikey = apikey )
profile = profile . replace ( name = " mad - hatter " )
with profiles . ProfileStore . open ( ) as config :
config . save ( profile )
# Optionally , set it as the default .
config . default = profile . name""" | url = api_url ( url )
url = urlparse ( url )
if url . username is not None :
raise ConnectError ( "Cannot provide user-name explicitly in URL (%r) when connecting; " "use login instead." % url . username )
if url . password is not None :
raise ConnectError ( "Cannot provide password explicitly in URL (%r) when connecting; " "use login instead." % url . username )
if apikey is None :
credentials = None
# Anonymous access .
else :
credentials = Credentials . parse ( apikey )
description = await fetch_api_description ( url , insecure )
# Return a new ( unsaved ) profile .
return Profile ( name = url . netloc , url = url . geturl ( ) , credentials = credentials , description = description ) |
def unescape ( s ) :
r"""Inverse of ` escape ` .
> > > unescape ( r ' \ x41 \ n \ x42 \ n \ x43 ' )
' A \ nB \ nC '
> > > unescape ( r ' \ u86c7 ' )
u ' \ u86c7'
> > > unescape ( u ' ah ' )
u ' ah '""" | if re . search ( r'(?<!\\)\\(\\\\)*[uU]' , s ) or isinstance ( s , unicode ) :
return unescapeUnicode ( s )
else :
return unescapeAscii ( s ) |
def identify_missing ( self , df , check_start = True ) :
"""Identify missing data .
Parameters
df : pd . DataFrame ( )
Dataframe to check for missing data .
check _ start : bool
turns 0 to 1 for the first observation , to display the start of the data
as the beginning of the missing data event
Returns
pd . DataFrame ( ) , str
dataframe where 1 indicates missing data and 0 indicates reported data ,
returns the column name generated from the MDAL Query""" | # Check start changes the first value of df to 1 , when the data stream is initially missing
# This allows the diff function to acknowledge the missing data
data_missing = df . isnull ( ) * 1
col_name = str ( data_missing . columns [ 0 ] )
# When there is no data stream at the beginning we change it to 1
if check_start & data_missing [ col_name ] [ 0 ] == 1 :
data_missing [ col_name ] [ 0 ] = 0
return data_missing , col_name |
def get_ref_dict ( self , schema ) :
"""Method to create a dictionary containing a JSON reference to the
schema in the spec""" | schema_key = make_schema_key ( schema )
ref_schema = build_reference ( "schema" , self . openapi_version . major , self . refs [ schema_key ] )
if getattr ( schema , "many" , False ) :
return { "type" : "array" , "items" : ref_schema }
return ref_schema |
def get_agents ( ) :
"""Provides a list of hostnames / IPs of all agents in the cluster""" | agent_list = [ ]
agents = __get_all_agents ( )
for agent in agents :
agent_list . append ( agent [ "hostname" ] )
return agent_list |
def generate_message ( directory , m ) :
'''generate per - message header and implementation file''' | f = open ( os . path . join ( directory , 'MVMessage%s.h' % m . name_camel_case ) , mode = 'w' )
t . write ( f , '''
//
// MVMessage${name_camel_case}.h
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMessage.h"
/*!
Class that represents a ${name} Mavlink message.
@discussion ${description}
*/
@interface MVMessage${name_camel_case} : MVMessage
- (id)initWithSystemId:(uint8_t)systemId componentId:(uint8_t)componentId${{arg_fields: ${name_lower_camel_case}:(${arg_type}${array_prefix})${name_lower_camel_case}}};
${{fields://! ${description}
- (${return_type})${name_lower_camel_case}${get_arg_objc};
}}
@end
''' , m )
f . close ( )
f = open ( os . path . join ( directory , 'MVMessage%s.m' % m . name_camel_case ) , mode = 'w' )
t . write ( f , '''
//
// MVMessage${name_camel_case}.m
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMessage${name_camel_case}.h"
@implementation MVMessage${name_camel_case}
- (id)initWithSystemId:(uint8_t)systemId componentId:(uint8_t)componentId${{arg_fields: ${name_lower_camel_case}:(${arg_type}${array_prefix})${name_lower_camel_case}}} {
if ((self = [super init])) {
mavlink_msg_${name_lower}_pack(systemId, componentId, &(self->_message)${{arg_fields:, ${name_lower_camel_case}}});
}
return self;
}
${{fields:- (${return_type})${name_lower_camel_case}${get_arg_objc} {
${return_method_implementation}
}
}}
- (NSString *)description {
return [NSString stringWithFormat:@"%@${{fields:, ${name_lower_camel_case}=${print_format}}}", [super description]${{fields:, ${get_message}}}];
}
@end
''' , m )
f . close ( ) |
def _concat ( self , data ) :
"""Concatenate and slice the accepted data types to the defined
length .""" | if isinstance ( data , np . ndarray ) :
data_length = len ( data )
if data_length < self . length :
prev_chunk = self . data [ - ( self . length - data_length ) : ]
data = np . concatenate ( [ prev_chunk , data ] )
elif data_length > self . length :
data = data [ - self . length : ]
elif util . pd and isinstance ( data , util . pd . DataFrame ) :
data_length = len ( data )
if data_length < self . length :
prev_chunk = self . data . iloc [ - ( self . length - data_length ) : ]
data = util . pd . concat ( [ prev_chunk , data ] )
elif data_length > self . length :
data = data . iloc [ - self . length : ]
elif isinstance ( data , dict ) and data :
data_length = len ( list ( data . values ( ) ) [ 0 ] )
new_data = { }
for k , v in data . items ( ) :
if data_length < self . length :
prev_chunk = self . data [ k ] [ - ( self . length - data_length ) : ]
new_data [ k ] = np . concatenate ( [ prev_chunk , v ] )
elif data_length > self . length :
new_data [ k ] = v [ - self . length : ]
else :
new_data [ k ] = v
data = new_data
self . _chunk_length = data_length
return data |
def purge_archived_resources ( user , table ) :
"""Remove the entries to be purged from the database .""" | if user . is_not_super_admin ( ) :
raise dci_exc . Unauthorized ( )
where_clause = sql . and_ ( table . c . state == 'archived' )
query = table . delete ( ) . where ( where_clause )
flask . g . db_conn . execute ( query )
return flask . Response ( None , 204 , content_type = 'application/json' ) |
def get_pillar ( opts , grains , minion_id , saltenv = None , ext = None , funcs = None , pillar_override = None , pillarenv = None , extra_minion_data = None ) :
'''Return the correct pillar driver based on the file _ client option''' | file_client = opts [ 'file_client' ]
if opts . get ( 'master_type' ) == 'disable' and file_client == 'remote' :
file_client = 'local'
ptype = { 'remote' : RemotePillar , 'local' : Pillar } . get ( file_client , Pillar )
# If local pillar and we ' re caching , run through the cache system first
log . debug ( 'Determining pillar cache' )
if opts [ 'pillar_cache' ] :
log . info ( 'Compiling pillar from cache' )
log . debug ( 'get_pillar using pillar cache with ext: %s' , ext )
return PillarCache ( opts , grains , minion_id , saltenv , ext = ext , functions = funcs , pillar_override = pillar_override , pillarenv = pillarenv )
return ptype ( opts , grains , minion_id , saltenv , ext , functions = funcs , pillar_override = pillar_override , pillarenv = pillarenv , extra_minion_data = extra_minion_data ) |
def delete_repository_tag ( self , project_id , tag_name ) :
"""Deletes a tag of a repository with given name .
: param project _ id : The ID of a project
: param tag _ name : The name of a tag
: return : Dictionary containing delete tag
: raise : HttpError : If invalid response returned""" | return self . delete ( '/projects/{project_id}/repository/tags/{tag_name}' . format ( project_id = project_id , tag_name = tag_name ) ) |
def attach_process ( self , command , for_legion = False , broken_counter = None , pidfile = None , control = None , daemonize = None , touch_reload = None , signal_stop = None , signal_reload = None , honour_stdin = None , uid = None , gid = None , new_pid_ns = None , change_dir = None ) :
"""Attaches a command / daemon to the master process .
This will allow the uWSGI master to control / monitor / respawn this process .
http : / / uwsgi - docs . readthedocs . io / en / latest / AttachingDaemons . html
: param str | unicode command : The command line to execute .
: param bool for _ legion : Legion daemons will be executed only on the legion lord node ,
so there will always be a single daemon instance running in each legion .
Once the lord dies a daemon will be spawned on another node .
: param int broken _ counter : Maximum attempts before considering a daemon " broken " .
: param str | unicode pidfile : The pidfile path to check ( enable smart mode ) .
: param bool control : If True , the daemon becomes a ` control ` one :
if it dies the whole uWSGI instance dies .
: param bool daemonize : Daemonize the process ( enable smart2 mode ) .
: param list | str | unicode touch _ reload : List of files to check :
whenever they are ' touched ' , the daemon is restarted
: param int signal _ stop : The signal number to send to the daemon when uWSGI is stopped .
: param int signal _ reload : The signal number to send to the daemon when uWSGI is reloaded .
: param bool honour _ stdin : The signal number to send to the daemon when uWSGI is reloaded .
: param str | unicode | int uid : Drop privileges to the specified uid .
. . note : : Requires master running as root .
: param str | unicode | int gid : Drop privileges to the specified gid .
. . note : : Requires master running as root .
: param bool new _ pid _ ns : Spawn the process in a new pid namespace .
. . note : : Requires master running as root .
. . note : : Linux only .
: param str | unicode change _ dir : Use chdir ( ) to the specified directory
before running the command .""" | rule = KeyValue ( locals ( ) , keys = [ 'command' , 'broken_counter' , 'pidfile' , 'control' , 'daemonize' , 'touch_reload' , 'signal_stop' , 'signal_reload' , 'honour_stdin' , 'uid' , 'gid' , 'new_pid_ns' , 'change_dir' , ] , aliases = { 'command' : 'cmd' , 'broken_counter' : 'freq' , 'touch_reload' : 'touch' , 'signal_stop' : 'stopsignal' , 'signal_reload' : 'reloadsignal' , 'honour_stdin' : 'stdin' , 'new_pid_ns' : 'ns_pid' , 'change_dir' : 'chdir' , } , bool_keys = [ 'control' , 'daemonize' , 'honour_stdin' ] , list_keys = [ 'touch_reload' ] , )
prefix = 'legion-' if for_legion else ''
self . _set ( prefix + 'attach-daemon2' , rule , multi = True )
return self . _section |
def add_instance ( self , instance ) :
"""Append ` instance ` to model
Arguments :
instance ( dict ) : Serialised instance
Schema :
instance . json""" | assert isinstance ( instance , dict )
item = defaults [ "common" ] . copy ( )
item . update ( defaults [ "instance" ] )
item . update ( instance [ "data" ] )
item . update ( instance )
item [ "itemType" ] = "instance"
item [ "isToggled" ] = instance [ "data" ] . get ( "publish" , True )
item [ "hasCompatible" ] = True
item [ "category" ] = item [ "category" ] or item [ "family" ]
self . add_section ( item [ "category" ] )
# Visualised in Perspective
families = [ instance [ "data" ] [ "family" ] ]
families . extend ( instance [ "data" ] . get ( "families" , [ ] ) )
item [ "familiesConcatenated" ] += ", " . join ( families )
item = self . add_item ( item )
self . instances . append ( item ) |
def timestring ( self , pattern = "%Y-%m-%d %H:%M:%S" , timezone = None ) :
"""Returns a time string .
: param pattern = " % Y - % m - % d % H : % M : % S "
The format used . By default , an ISO - type format is used . The
syntax here is identical to the one used by time . strftime ( ) and
time . strptime ( ) .
: param timezone = self . timezone
The timezone ( in seconds west of UTC ) to return the value in . By
default , the timezone used when constructing the class is used
( local one by default ) . To use UTC , use timezone = 0 . To use the
local tz , use timezone = chronyk . LOCALTZ .""" | if timezone is None :
timezone = self . timezone
timestamp = self . __timestamp__ - timezone
timestamp -= LOCALTZ
return _strftime ( pattern , _gmtime ( timestamp ) ) |
def _get_binop_contexts ( context , left , right ) :
"""Get contexts for binary operations .
This will return two inference contexts , the first one
for x . _ _ op _ _ ( y ) , the other one for y . _ _ rop _ _ ( x ) , where
only the arguments are inversed .""" | # The order is important , since the first one should be
# left . _ _ op _ _ ( right ) .
for arg in ( right , left ) :
new_context = context . clone ( )
new_context . callcontext = contextmod . CallContext ( args = [ arg ] )
new_context . boundnode = None
yield new_context |
def _synthesize_multiple_python ( self , text_file , output_file_path , quit_after = None , backwards = False ) :
"""Synthesize multiple fragments via a Python call .
: rtype : tuple ( result , ( anchors , current _ time , num _ chars ) )""" | self . log ( u"Synthesizing multiple via a Python call..." )
ret = self . _synthesize_multiple_generic ( helper_function = self . _synthesize_single_python_helper , text_file = text_file , output_file_path = output_file_path , quit_after = quit_after , backwards = backwards )
self . log ( u"Synthesizing multiple via a Python call... done" )
return ret |
def namespace_from_url ( url ) :
"""Construct a dotted namespace string from a URL .""" | parsed = urlparse ( url )
if parsed . hostname is None or parsed . hostname in [ 'localhost' , 'localhost.localdomain' ] or ( _ipv4_re . search ( parsed . hostname ) ) :
return None
namespace = parsed . hostname . split ( '.' )
namespace . reverse ( )
if namespace and not namespace [ 0 ] :
namespace . pop ( 0 )
if namespace and namespace [ - 1 ] == 'www' :
namespace . pop ( - 1 )
return type ( url ) ( '.' . join ( namespace ) ) |
def visit_Assignment ( self , node ) :
"""Visitor for ` Assignment ` AST node .""" | obj_memory = self . memory [ node . left . identifier . name ]
obj_program = self . visit ( node . right )
if obj_memory is not None :
obj_program_value = obj_program . value
obj_program = obj_memory
obj_program . value = obj_program_value
self . memory [ node . left . identifier . name ] = obj_program |
def chi_perp_from_mass1_mass2_xi2 ( mass1 , mass2 , xi2 ) :
"""Returns the in - plane spin from mass1 , mass2 , and xi2 for the
secondary mass .""" | q = q_from_mass1_mass2 ( mass1 , mass2 )
a1 = 2 + 3 * q / 2
a2 = 2 + 3 / ( 2 * q )
return q ** 2 * a2 / a1 * xi2 |
def connect_head_node_proxy_with_path ( self , name , path , ** kwargs ) : # noqa : E501
"""connect _ head _ node _ proxy _ with _ path # noqa : E501
connect HEAD requests to proxy of Node # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . connect _ head _ node _ proxy _ with _ path ( name , path , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the NodeProxyOptions ( required )
: param str path : path to the resource ( required )
: param str path2 : Path is the URL path to use for the current proxy request to node .
: return : str
If the method is called asynchronously ,
returns the request thread .""" | kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . connect_head_node_proxy_with_path_with_http_info ( name , path , ** kwargs )
# noqa : E501
else :
( data ) = self . connect_head_node_proxy_with_path_with_http_info ( name , path , ** kwargs )
# noqa : E501
return data |
def get_scoped_package_version_metadata_from_recycle_bin ( self , feed_id , package_scope , unscoped_package_name , package_version ) :
"""GetScopedPackageVersionMetadataFromRecycleBin .
[ Preview API ] Get information about a scoped package version in the recycle bin .
: param str feed _ id : Name or ID of the feed .
: param str package _ scope : Scope of the package ( the ' scope ' part of @ scope / name )
: param str unscoped _ package _ name : Name of the package ( the ' name ' part of @ scope / name ) .
: param str package _ version : Version of the package .
: rtype : : class : ` < NpmPackageVersionDeletionState > < azure . devops . v5_0 . npm . models . NpmPackageVersionDeletionState > `""" | route_values = { }
if feed_id is not None :
route_values [ 'feedId' ] = self . _serialize . url ( 'feed_id' , feed_id , 'str' )
if package_scope is not None :
route_values [ 'packageScope' ] = self . _serialize . url ( 'package_scope' , package_scope , 'str' )
if unscoped_package_name is not None :
route_values [ 'unscopedPackageName' ] = self . _serialize . url ( 'unscoped_package_name' , unscoped_package_name , 'str' )
if package_version is not None :
route_values [ 'packageVersion' ] = self . _serialize . url ( 'package_version' , package_version , 'str' )
response = self . _send ( http_method = 'GET' , location_id = '220f45eb-94a5-432c-902a-5b8c6372e415' , version = '5.0-preview.1' , route_values = route_values )
return self . _deserialize ( 'NpmPackageVersionDeletionState' , response ) |
def get_locale ( ) :
"""Search the default platform locale and norm it .
@ returns ( locale , encoding )
@ rtype ( string , string )""" | try :
loc , encoding = locale . getdefaultlocale ( )
except ValueError : # locale configuration is broken - ignore that
loc , encoding = None , None
if loc is None :
loc = "C"
else :
loc = norm_locale ( loc )
if encoding is None :
encoding = "ascii"
return ( loc , encoding ) |
def build_layers ( node , disambiguate_names = True ) :
"""Return a list of GeoJSON FeatureCollections , one for each folder in the given KML DOM node that contains geodata .
Name each FeatureCollection ( via a ` ` ' name ' ` ` attribute ) according to its corresponding KML folder name .
If ` ` disambiguate _ names = = True ` ` , then disambiguate repeated layer names via : func : ` disambiguate ` .
Warning : this can produce layers with the same geodata in case the KML node has nested folders with geodata .""" | layers = [ ]
names = [ ]
for i , folder in enumerate ( get ( node , 'Folder' ) ) :
name = val ( get1 ( folder , 'name' ) )
geojson = build_feature_collection ( folder , name )
if geojson [ 'features' ] :
layers . append ( geojson )
names . append ( name )
if not layers : # No folders , so use the root node
name = val ( get1 ( node , 'name' ) )
geojson = build_feature_collection ( node , name )
if geojson [ 'features' ] :
layers . append ( geojson )
names . append ( name )
if disambiguate_names :
new_names = disambiguate ( names )
new_layers = [ ]
for i , layer in enumerate ( layers ) :
layer [ 'name' ] = new_names [ i ]
new_layers . append ( layer )
layers = new_layers
return layers |
def run_command_line ( args = None ) :
"""Entry point for the FlowCal and flowcal console scripts .
Parameters
args : list of strings , optional
Command line arguments . If None or not specified , get arguments
from ` ` sys . argv ` ` .
See Also
FlowCal . excel _ ui . run ( )
http : / / amir . rachum . com / blog / 2017/07/28 / python - entry - points /""" | # Get arguments from ` ` sys . argv ` ` if necessary .
# ` ` sys . argv ` ` has the name of the script as its first element . We remove
# this element because it will break ` ` parser . parse _ args ( ) ` ` later . In fact ,
# ` ` parser . parse _ args ( ) ` ` , if provided with no arguments , will also use
# ` ` sys . argv ` ` after removing the first element .
if args is None :
args = sys . argv [ 1 : ]
import argparse
# Read command line arguments
parser = argparse . ArgumentParser ( description = "process flow cytometry files with FlowCal's Excel UI." )
parser . add_argument ( "-i" , "--inputpath" , type = str , nargs = '?' , help = "input Excel file name. If not specified, show open file window" )
parser . add_argument ( "-o" , "--outputpath" , type = str , nargs = '?' , help = "output Excel file name. If not specified, use [INPUTPATH]_output" )
parser . add_argument ( "-v" , "--verbose" , action = "store_true" , help = "print information about individual processing steps" )
parser . add_argument ( "-p" , "--plot" , action = "store_true" , help = "generate and save density plots/histograms of beads and samples" )
parser . add_argument ( "-H" , "--histogram-sheet" , action = "store_true" , help = "generate sheet in output Excel file specifying histogram bins" )
args = parser . parse_args ( args = args )
# Run Excel UI
run ( input_path = args . inputpath , output_path = args . outputpath , verbose = args . verbose , plot = args . plot , hist_sheet = args . histogram_sheet ) |
def extract_bugs ( changelog ) :
"""Takes output from git log - - oneline and extracts bug numbers""" | bug_regexp = re . compile ( r'\bbug (\d+)\b' , re . IGNORECASE )
bugs = set ( )
for line in changelog :
for bug in bug_regexp . findall ( line ) :
bugs . add ( bug )
return sorted ( list ( bugs ) ) |
def _bca ( ab_estimates , sample_point , n_boot , alpha = 0.05 ) :
"""Get ( 1 - alpha ) * 100 bias - corrected confidence interval estimate
Note that this is similar to the " cper " module implemented in
: py : func : ` pingouin . compute _ bootci ` .
Parameters
ab _ estimates : 1d array - like
Array with bootstrap estimates for each sample .
sample _ point : float
Indirect effect point estimate based on full sample .
n _ boot : int
Number of bootstrap samples
alpha : float
Alpha for confidence interval
Returns
CI : 1d array - like
Lower limit and upper limit bias - corrected confidence interval
estimates .""" | # Bias of bootstrap estimates
z0 = norm . ppf ( np . sum ( ab_estimates < sample_point ) / n_boot )
# Adjusted intervals
adjusted_ll = norm . cdf ( 2 * z0 + norm . ppf ( alpha / 2 ) ) * 100
adjusted_ul = norm . cdf ( 2 * z0 + norm . ppf ( 1 - alpha / 2 ) ) * 100
ll = np . percentile ( ab_estimates , q = adjusted_ll )
ul = np . percentile ( ab_estimates , q = adjusted_ul )
return np . array ( [ ll , ul ] ) |
def community_topic_subscription_create ( self , topic_id , data , ** kwargs ) :
"https : / / developer . zendesk . com / rest _ api / docs / help _ center / subscriptions # create - topic - subscription" | api_path = "/api/v2/community/topics/{topic_id}/subscriptions.json"
api_path = api_path . format ( topic_id = topic_id )
return self . call ( api_path , method = "POST" , data = data , ** kwargs ) |
def user ( self , ** params ) :
"""Stream user
Accepted params found at :
https : / / dev . twitter . com / docs / api / 1.1 / get / user""" | url = 'https://userstream.twitter.com/%s/user.json' % self . streamer . api_version
self . streamer . _request ( url , params = params ) |
def poll_integration_information_for_waiting_integration_alerts ( ) :
"""poll _ integration _ information _ for _ waiting _ integration _ alerts .""" | if not polling_integration_alerts :
return
logger . debug ( "Polling information for waiting integration alerts" )
for integration_alert in polling_integration_alerts :
configured_integration = integration_alert . configured_integration
integration = configured_integration . integration
polling_duration = integration . polling_duration
if get_current_datetime_utc ( ) - integration_alert . send_time > polling_duration :
logger . debug ( "Polling duration expired for integration alert %s" , integration_alert )
integration_alert . status = IntegrationAlertStatuses . ERROR_POLLING . name
else :
integration_alert . status = IntegrationAlertStatuses . IN_POLLING . name
poll_integration_alert_data ( integration_alert ) |
def rosmsg ( self ) :
""": obj : ` sensor _ msgs . CamerInfo ` : Returns ROS CamerInfo msg""" | from sensor_msgs . msg import CameraInfo , RegionOfInterest
from std_msgs . msg import Header
msg_header = Header ( )
msg_header . frame_id = self . _frame
msg_roi = RegionOfInterest ( )
msg_roi . x_offset = 0
msg_roi . y_offset = 0
msg_roi . height = 0
msg_roi . width = 0
msg_roi . do_rectify = 0
msg = CameraInfo ( )
msg . header = msg_header
msg . height = self . _height
msg . width = self . _width
msg . distortion_model = 'plumb_bob'
msg . D = [ 0.0 , 0.0 , 0.0 , 0.0 , 0.0 ]
msg . K = [ self . _fx , 0.0 , self . _cx , 0.0 , self . _fy , self . _cy , 0.0 , 0.0 , 1.0 ]
msg . R = [ 1.0 , 0.0 , 0.0 , 0.0 , 1.0 , 0.0 , 0.0 , 0.0 , 1.0 ]
msg . P = [ self . _fx , 0.0 , self . _cx , 0.0 , 0.0 , self . _fx , self . _cy , 0.0 , 0.0 , 0.0 , 1.0 , 0.0 ]
msg . binning_x = 0
msg . binning_y = 0
msg . roi = msg_roi
return msg |
def parse ( source , segmenter = 'nlapi' , language = None , max_length = None , classname = None , attributes = None , ** kwargs ) :
"""Parses input source .
Args :
source ( str ) : Input source to process .
segmenter ( : obj : ` str ` , optional ) : Segmenter to use [ default : nlapi ] .
language ( : obj : ` str ` , optional ) : Language code .
max _ length ( : obj : ` int ` , optional ) : Maximum length of a chunk .
classname ( : obj : ` str ` , optional ) : Class name of output SPAN tags .
attributes ( : obj : ` dict ` , optional ) : Attributes for output SPAN tags .
Returns :
Results in a dict . : code : ` chunks ` holds a list of chunks
( : obj : ` budou . chunk . ChunkList ` ) and : code : ` html _ code ` holds the output HTML
code .""" | parser = get_parser ( segmenter , ** kwargs )
return parser . parse ( source , language = language , max_length = max_length , classname = classname , attributes = attributes ) |
def AddrStrToScriptHash ( address ) :
"""Convert a public address to a script hash .
Args :
address ( str ) : base 58 check encoded public address .
Raises :
ValueError : if the address length of address version is incorrect .
Exception : if the address checksum fails .
Returns :
UInt160:""" | data = b58decode ( address )
if len ( data ) != 25 :
raise ValueError ( 'Not correct Address, wrong length.' )
if data [ 0 ] != settings . ADDRESS_VERSION :
raise ValueError ( 'Not correct Coin Version' )
checksum = Crypto . Default ( ) . Hash256 ( data [ : 21 ] ) [ : 4 ]
if checksum != data [ 21 : ] :
raise Exception ( 'Address format error' )
return UInt160 ( data = data [ 1 : 21 ] ) |
def ListDescendentPathInfos ( self , client_id , path_type , components , timestamp = None , max_depth = None , cursor = None ) :
"""Lists path info records that correspond to descendants of given path .""" | path_infos = [ ]
query = ""
path = mysql_utils . ComponentsToPath ( components )
values = { "client_id" : db_utils . ClientIDToInt ( client_id ) , "path_type" : int ( path_type ) , "path" : db_utils . EscapeWildcards ( path ) , }
query += """
SELECT path, directory, UNIX_TIMESTAMP(p.timestamp),
stat_entry, UNIX_TIMESTAMP(last_stat_entry_timestamp),
hash_entry, UNIX_TIMESTAMP(last_hash_entry_timestamp)
FROM client_paths AS p
"""
if timestamp is None :
query += """
LEFT JOIN client_path_stat_entries AS s ON
(p.client_id = s.client_id AND
p.path_type = s.path_type AND
p.path_id = s.path_id AND
p.last_stat_entry_timestamp = s.timestamp)
LEFT JOIN client_path_hash_entries AS h ON
(p.client_id = h.client_id AND
p.path_type = h.path_type AND
p.path_id = h.path_id AND
p.last_hash_entry_timestamp = h.timestamp)
"""
only_explicit = False
else :
query += """
LEFT JOIN (SELECT sr.client_id, sr.path_type, sr.path_id, sr.stat_entry
FROM client_path_stat_entries AS sr
INNER JOIN (SELECT client_id, path_type, path_id,
MAX(timestamp) AS max_timestamp
FROM client_path_stat_entries
WHERE UNIX_TIMESTAMP(timestamp) <= %(timestamp)s
GROUP BY client_id, path_type, path_id) AS st
ON sr.client_id = st.client_id
AND sr.path_type = st.path_type
AND sr.path_id = st.path_id
AND sr.timestamp = st.max_timestamp) AS s
ON (p.client_id = s.client_id AND
p.path_type = s.path_type AND
p.path_id = s.path_id)
LEFT JOIN (SELECT hr.client_id, hr.path_type, hr.path_id, hr.hash_entry
FROM client_path_hash_entries AS hr
INNER JOIN (SELECT client_id, path_type, path_id,
MAX(timestamp) AS max_timestamp
FROM client_path_hash_entries
WHERE UNIX_TIMESTAMP(timestamp) <= %(timestamp)s
GROUP BY client_id, path_type, path_id) AS ht
ON hr.client_id = ht.client_id
AND hr.path_type = ht.path_type
AND hr.path_id = ht.path_id
AND hr.timestamp = ht.max_timestamp) AS h
ON (p.client_id = h.client_id AND
p.path_type = h.path_type AND
p.path_id = h.path_id)
"""
values [ "timestamp" ] = mysql_utils . RDFDatetimeToTimestamp ( timestamp )
only_explicit = True
query += """
WHERE p.client_id = %(client_id)s
AND p.path_type = %(path_type)s
AND path LIKE concat(%(path)s, '/%%')
"""
if max_depth is not None :
query += """
AND depth <= %(depth)s
"""
values [ "depth" ] = len ( components ) + max_depth
cursor . execute ( query , values )
for row in cursor . fetchall ( ) : # pyformat : disable
( path , directory , timestamp , stat_entry_bytes , last_stat_entry_timestamp , hash_entry_bytes , last_hash_entry_timestamp ) = row
# pyformat : enable
components = mysql_utils . PathToComponents ( path )
if stat_entry_bytes is not None :
stat_entry = rdf_client_fs . StatEntry . FromSerializedString ( stat_entry_bytes )
else :
stat_entry = None
if hash_entry_bytes is not None :
hash_entry = rdf_crypto . Hash . FromSerializedString ( hash_entry_bytes )
else :
hash_entry = None
datetime = mysql_utils . TimestampToRDFDatetime
path_info = rdf_objects . PathInfo ( path_type = path_type , components = components , timestamp = datetime ( timestamp ) , last_stat_entry_timestamp = datetime ( last_stat_entry_timestamp ) , last_hash_entry_timestamp = datetime ( last_hash_entry_timestamp ) , directory = directory , stat_entry = stat_entry , hash_entry = hash_entry )
path_infos . append ( path_info )
path_infos . sort ( key = lambda _ : tuple ( _ . components ) )
# For specific timestamp , we return information only about explicit paths
# ( paths that have associated stat or hash entry or have an ancestor that is
# explicit ) .
if not only_explicit :
return path_infos
explicit_path_infos = [ ]
has_explicit_ancestor = set ( )
# This list is sorted according to the keys component , so by traversing it
# in the reverse order we make sure that we process deeper paths first .
for path_info in reversed ( path_infos ) :
components = tuple ( path_info . components )
if ( path_info . HasField ( "stat_entry" ) or path_info . HasField ( "hash_entry" ) or components in has_explicit_ancestor ) :
explicit_path_infos . append ( path_info )
has_explicit_ancestor . add ( components [ : - 1 ] )
# Since we collected explicit paths in reverse order , we need to reverse it
# again to conform to the interface .
return list ( reversed ( explicit_path_infos ) ) |
def experimental ( name = None ) :
"""A simple decorator to mark functions and methods as experimental .""" | def inner ( func ) :
@ functools . wraps ( func )
def wrapper ( * fargs , ** kw ) :
fname = name
if name is None :
fname = func . __name__
warnings . warn ( "%s" % fname , category = ExperimentalWarning , stacklevel = 2 )
return func ( * fargs , ** kw )
return wrapper
return inner |
def transform ( self , transform , node ) :
"""Transforms a node following the transform especification
: param transform : Transform node
: type transform : lxml . etree . Element
: param node : Element to transform
: type node : str
: return : Transformed node in a String""" | method = transform . get ( 'Algorithm' )
if method not in constants . TransformUsageDSigTransform :
raise Exception ( 'Method not allowed' )
# C14N methods are allowed
if method in constants . TransformUsageC14NMethod :
return self . canonicalization ( method , etree . fromstring ( node ) )
# Enveloped method removes the Signature Node from the element
if method == constants . TransformEnveloped :
tree = transform . getroottree ( )
root = etree . fromstring ( node )
signature = root . find ( tree . getelementpath ( transform . getparent ( ) . getparent ( ) . getparent ( ) . getparent ( ) ) )
root . remove ( signature )
return self . canonicalization ( constants . TransformInclC14N , root )
if method == constants . TransformBase64 :
try :
root = etree . fromstring ( node )
return base64 . b64decode ( root . text )
except Exception :
return base64 . b64decode ( node )
raise Exception ( 'Method not found' ) |
def as_dict ( self ) :
"""Return a dictionary containing the current values
of the object .
Returns :
( dict ) : The object represented as a dictionary""" | out = { }
for prop in self :
propval = getattr ( self , prop )
if hasattr ( propval , 'for_json' ) :
out [ prop ] = propval . for_json ( )
elif isinstance ( propval , list ) :
out [ prop ] = [ getattr ( x , 'for_json' , lambda : x ) ( ) for x in propval ]
elif isinstance ( propval , ( ProtocolBase , LiteralValue ) ) :
out [ prop ] = propval . as_dict ( )
elif propval is not None :
out [ prop ] = propval
return out |
def _create_PmtInf_node ( self ) :
"""Method to create the blank payment information nodes as a dict .""" | ED = dict ( )
# ED is element dict
ED [ 'PmtInfNode' ] = ET . Element ( "PmtInf" )
ED [ 'PmtInfIdNode' ] = ET . Element ( "PmtInfId" )
ED [ 'PmtMtdNode' ] = ET . Element ( "PmtMtd" )
ED [ 'BtchBookgNode' ] = ET . Element ( "BtchBookg" )
ED [ 'NbOfTxsNode' ] = ET . Element ( "NbOfTxs" )
ED [ 'CtrlSumNode' ] = ET . Element ( "CtrlSum" )
ED [ 'PmtTpInfNode' ] = ET . Element ( "PmtTpInf" )
ED [ 'SvcLvlNode' ] = ET . Element ( "SvcLvl" )
ED [ 'Cd_SvcLvl_Node' ] = ET . Element ( "Cd" )
ED [ 'LclInstrmNode' ] = ET . Element ( "LclInstrm" )
ED [ 'Cd_LclInstrm_Node' ] = ET . Element ( "Cd" )
ED [ 'SeqTpNode' ] = ET . Element ( "SeqTp" )
ED [ 'ReqdColltnDtNode' ] = ET . Element ( "ReqdColltnDt" )
ED [ 'CdtrNode' ] = ET . Element ( "Cdtr" )
ED [ 'Nm_Cdtr_Node' ] = ET . Element ( "Nm" )
ED [ 'CdtrAcctNode' ] = ET . Element ( "CdtrAcct" )
ED [ 'Id_CdtrAcct_Node' ] = ET . Element ( "Id" )
ED [ 'IBAN_CdtrAcct_Node' ] = ET . Element ( "IBAN" )
ED [ 'CdtrAgtNode' ] = ET . Element ( "CdtrAgt" )
ED [ 'FinInstnId_CdtrAgt_Node' ] = ET . Element ( "FinInstnId" )
if 'BIC' in self . _config :
ED [ 'BIC_CdtrAgt_Node' ] = ET . Element ( "BIC" )
ED [ 'ChrgBrNode' ] = ET . Element ( "ChrgBr" )
ED [ 'CdtrSchmeIdNode' ] = ET . Element ( "CdtrSchmeId" )
ED [ 'Nm_CdtrSchmeId_Node' ] = ET . Element ( "Nm" )
ED [ 'Id_CdtrSchmeId_Node' ] = ET . Element ( "Id" )
ED [ 'PrvtIdNode' ] = ET . Element ( "PrvtId" )
ED [ 'OthrNode' ] = ET . Element ( "Othr" )
ED [ 'Id_Othr_Node' ] = ET . Element ( "Id" )
ED [ 'SchmeNmNode' ] = ET . Element ( "SchmeNm" )
ED [ 'PrtryNode' ] = ET . Element ( "Prtry" )
return ED |
def CallState ( self , next_state = "" , start_time = None ) :
"""This method is used to schedule a new state on a different worker .
This is basically the same as CallFlow ( ) except we are calling
ourselves . The state will be invoked at a later time .
Args :
next _ state : The state in this flow to be invoked .
start _ time : Start the flow at this time . This delays notification for
flow processing into the future . Note that the flow may still be
processed earlier if there are client responses waiting .
Raises :
FlowRunnerError : if the next state is not valid .""" | # Check if the state is valid
if not getattr ( self . flow_obj , next_state ) :
raise FlowRunnerError ( "Next state %s is invalid." % next_state )
# Queue the response message to the parent flow
request_state = rdf_flow_runner . RequestState ( id = self . GetNextOutboundId ( ) , session_id = self . context . session_id , client_id = self . runner_args . client_id , next_state = next_state )
self . QueueRequest ( request_state , timestamp = start_time )
# Send a fake reply .
msg = rdf_flows . GrrMessage ( session_id = self . session_id , request_id = request_state . id , response_id = 1 , auth_state = rdf_flows . GrrMessage . AuthorizationState . AUTHENTICATED , payload = rdf_flows . GrrStatus ( ) , type = rdf_flows . GrrMessage . Type . STATUS )
self . QueueResponse ( msg , start_time )
# Notify the worker about it .
self . QueueNotification ( session_id = self . session_id , timestamp = start_time ) |
def get_tag ( expnum , key ) :
"""given a key , return the vospace tag value .
@ param expnum : Number of the CFHT exposure that a tag value is needed for
@ param key : The process tag ( such as mkpsf _ 00 ) that is being looked up .
@ return : the value of the tag
@ rtype : str""" | uri = tag_uri ( key )
force = uri not in get_tags ( expnum )
value = get_tags ( expnum , force = force ) . get ( uri , None )
return value |
def add ( TargetGroup , NewMember , Config = None , Args = None ) :
r"""Adds members to an existing group .
Args :
TargetGroup ( Group ) : The target group for the addition .
NewMember ( Group / Task ) : The member to be added .
Config ( dict ) : The config for the member .
Args ( OrderedDict ) : ArgConfig for the NewMember , if it ' s a task ( optional ) .""" | Member = Task ( NewMember , Args or { } , Config or { } ) if isfunction ( NewMember ) else Group ( NewMember , Config or { } )
ParentMembers = TargetGroup . __ec_member__ . Members
ParentMembers [ Member . Config [ 'name' ] ] = Member
alias = Member . Config . get ( 'alias' )
if alias :
ParentMembers [ alias ] = Member |
def ppaged ( self , msg : str , end : str = '\n' , chop : bool = False ) -> None :
"""Print output using a pager if it would go off screen and stdout isn ' t currently being redirected .
Never uses a pager inside of a script ( Python or text ) or when output is being redirected or piped or when
stdout or stdin are not a fully functional terminal .
: param msg : message to print to current stdout ( anything convertible to a str with ' { } ' . format ( ) is OK )
: param end : string appended after the end of the message if not already present , default a newline
: param chop : True - > causes lines longer than the screen width to be chopped ( truncated ) rather than wrapped
- truncated text is still accessible by scrolling with the right & left arrow keys
- chopping is ideal for displaying wide tabular data as is done in utilities like pgcli
False - > causes lines longer than the screen width to wrap to the next line
- wrapping is ideal when you want to keep users from having to use horizontal scrolling
WARNING : On Windows , the text always wraps regardless of what the chop argument is set to""" | import subprocess
if msg is not None and msg != '' :
try :
msg_str = '{}' . format ( msg )
if not msg_str . endswith ( end ) :
msg_str += end
# Attempt to detect if we are not running within a fully functional terminal .
# Don ' t try to use the pager when being run by a continuous integration system like Jenkins + pexpect .
functional_terminal = False
if self . stdin . isatty ( ) and self . stdout . isatty ( ) :
if sys . platform . startswith ( 'win' ) or os . environ . get ( 'TERM' ) is not None :
functional_terminal = True
# Don ' t attempt to use a pager that can block if redirecting or running a script ( either text or Python )
# Also only attempt to use a pager if actually running in a real fully functional terminal
if functional_terminal and not self . redirecting and not self . _in_py and not self . _script_dir :
if self . colors . lower ( ) == constants . COLORS_NEVER . lower ( ) :
msg_str = utils . strip_ansi ( msg_str )
pager = self . pager
if chop :
pager = self . pager_chop
# Prevent KeyboardInterrupts while in the pager . The pager application will
# still receive the SIGINT since it is in the same process group as us .
with self . sigint_protection :
pipe_proc = subprocess . Popen ( pager , shell = True , stdin = subprocess . PIPE )
pipe_proc . communicate ( msg_str . encode ( 'utf-8' , 'replace' ) )
else :
self . decolorized_write ( self . stdout , msg_str )
except BrokenPipeError : # This occurs if a command ' s output is being piped to another process and that process closes before the
# command is finished . If you would like your application to print a warning message , then set the
# broken _ pipe _ warning attribute to the message you want printed . `
if self . broken_pipe_warning :
sys . stderr . write ( self . broken_pipe_warning ) |
def aggregate ( self , rankings , breaking = "full" , k = None ) :
"""Description :
Takes in a set of rankings and computes the
Plackett - Luce model aggregate ranking .
Parameters :
rankings : set of rankings to aggregate
breaking : type of breaking to use
k : number to be used for top , bottom , and position breakings""" | breakings = { "full" : self . _full , "top" : self . _top , "bottom" : self . _bot , "adjacent" : self . _adj , "position" : self . _pos }
if ( k == None and ( breaking != "full" != breaking != "position" ) ) :
raise ValueError ( "k cannot be None for non-full or non-position breaking" )
break_mat = breakings [ breaking ] ( k )
P = np . zeros ( ( self . m , self . m ) )
for ranking in rankings :
localP = np . zeros ( ( self . m , self . m ) )
for ind1 , alt1 in enumerate ( self . alts ) :
for ind2 , alt2 in enumerate ( self . alts ) :
if ind1 == ind2 :
continue
alt1_rank = util . get_index_nested ( ranking , alt1 )
alt2_rank = util . get_index_nested ( ranking , alt2 )
if alt1_rank < alt2_rank : # alt 1 is ranked higher
localP [ ind1 ] [ ind2 ] = 1
for ind , alt in enumerate ( self . alts ) :
localP [ ind ] [ ind ] = - 1 * ( np . sum ( localP . T [ ind ] [ : ind ] ) + np . sum ( localP . T [ ind ] [ ind + 1 : ] ) )
localP *= break_mat
P += localP / len ( rankings )
# epsilon = 1e - 7
# assert ( np . linalg . matrix _ rank ( P ) = = self . m - 1)
# assert ( all ( np . sum ( P , axis = 0 ) < = epsilon ) )
U , S , V = np . linalg . svd ( P )
gamma = np . abs ( V [ - 1 ] )
gamma /= np . sum ( gamma )
# assert ( all ( np . dot ( P , gamma ) < epsilon ) )
alt_scores = { cand : gamma [ ind ] for ind , cand in enumerate ( self . alts ) }
self . P = P
self . create_rank_dicts ( alt_scores )
return gamma |
def inverse_transform ( self , X , copy = None ) :
"""Scale back the data to the original representation .
: param X : Scaled data matrix .
: type X : numpy . ndarray , shape [ n _ samples , n _ features ]
: param bool copy : Copy the X data matrix .
: return : X data matrix with the scaling operation reverted .
: rtype : numpy . ndarray , shape [ n _ samples , n _ features ]""" | check_is_fitted ( self , 'scale_' )
copy = copy if copy is not None else self . copy
if sparse . issparse ( X ) :
if self . with_mean :
raise ValueError ( "Cannot uncenter sparse matrices: pass `with_mean=False` " "instead See docstring for motivation and alternatives." )
if not sparse . isspmatrix_csr ( X ) :
X = X . tocsr ( )
copy = False
if copy :
X = X . copy ( )
if self . scale_ is not None :
inplace_column_scale ( X , self . scale_ )
else :
X = numpy . asarray ( X )
if copy :
X = X . copy ( )
if self . with_std :
X *= self . scale_
if self . with_mean :
X += self . mean_
return X |
def _is_dst ( dt ) :
"""Returns True if a given datetime object represents a time with
DST shift .""" | # we can ' t use ` dt . timestamp ( ) ` here since it requires a ` utcoffset `
# and we don ' t want to get into a recursive loop
localtime = time . localtime ( time . mktime ( ( dt . year , dt . month , dt . day , dt . hour , dt . minute , dt . second , dt . weekday ( ) , 0 , # day of the year
- 1 # dst
) ) )
return localtime . tm_isdst > 0 |
def prune_by_missing_percent ( df , percentage = 0.4 ) :
"""The method to remove the attributes ( genes ) with more than a percentage of missing values
: param df : the dataframe containing the attributes to be pruned
: param percentage : the percentage threshold ( 0.4 by default )
: return : the pruned dataframe""" | mask = ( df . isnull ( ) . sum ( ) / df . shape [ 0 ] ) . map ( lambda x : True if x < percentage else False )
pruned_df = df [ df . columns [ mask . values ] ]
return pruned_df |
def from_json ( cls , data , json_schema_class = None ) :
"""This class overwrites the from _ json method , thus making sure that if ` from _ json ` is called from this class instance , it will provide its JSON schema as a default one""" | schema = cls . json_schema if json_schema_class is None else json_schema_class ( )
return super ( InfinityVertex , cls ) . from_json ( data = data , json_schema_class = schema . __class__ ) |
def __create ( self , short_description , period , ** kwargs ) :
"""Call documentation : ` / preapproval / create
< https : / / www . wepay . com / developer / reference / preapproval # create > ` _ , plus
extra keyword parameters :
: keyword str access _ token : will be used instead of instance ' s
` ` access _ token ` ` , with ` ` batch _ mode = True ` ` will set ` authorization `
param to it ' s value .
: keyword bool batch _ mode : turn on / off the batch _ mode , see
: class : ` wepay . api . WePay `
: keyword str batch _ reference _ id : ` reference _ id ` param for batch call ,
see : class : ` wepay . api . WePay `
: keyword str api _ version : WePay API version , see
: class : ` wepay . api . WePay `""" | params = { 'short_description' : short_description , 'period' : period }
return self . make_call ( self . __create , params , kwargs ) |
def defaults ( self ) :
"""Return default metadata .""" | return dict ( access_right = 'open' , description = self . description , license = 'other-open' , publication_date = self . release [ 'published_at' ] [ : 10 ] , related_identifiers = list ( self . related_identifiers ) , version = self . version , title = self . title , upload_type = 'software' , ) |
def roots ( self ) :
"""Utilises Boyd ' s O ( n ^ 2 ) recursive subdivision algorithm . The chebfun
is recursively subsampled until it is successfully represented to
machine precision by a sequence of piecewise interpolants of degree
100 or less . A colleague matrix eigenvalue solve is then applied to
each of these pieces and the results are concatenated .
See :
J . P . Boyd , Computing zeros on a real interval through Chebyshev
expansion and polynomial rootfinding , SIAM J . Numer . Anal . , 40
(2002 ) , pp . 1666–1682.""" | if self . size ( ) == 1 :
return np . array ( [ ] )
elif self . size ( ) <= 100 :
ak = self . coefficients ( )
v = np . zeros_like ( ak [ : - 1 ] )
v [ 1 ] = 0.5
C1 = linalg . toeplitz ( v )
C2 = np . zeros_like ( C1 )
C1 [ 0 , 1 ] = 1.
C2 [ - 1 , : ] = ak [ : - 1 ]
C = C1 - .5 / ak [ - 1 ] * C2
eigenvalues = linalg . eigvals ( C )
roots = [ eig . real for eig in eigenvalues if np . allclose ( eig . imag , 0 , atol = 1e-10 ) and np . abs ( eig . real ) <= 1 ]
scaled_roots = self . _ui_to_ab ( np . array ( roots ) )
return scaled_roots
else : # divide at a close - to - zero split - point
split_point = self . _ui_to_ab ( 0.0123456789 )
return np . concatenate ( ( self . restrict ( [ self . _domain [ 0 ] , split_point ] ) . roots ( ) , self . restrict ( [ split_point , self . _domain [ 1 ] ] ) . roots ( ) ) ) |
def render_heading ( self , token ) :
"""Overrides super ( ) . render _ heading ; stores rendered heading first ,
then returns it .""" | rendered = super ( ) . render_heading ( token )
content = self . parse_rendered_heading ( rendered )
if not ( self . omit_title and token . level == 1 or token . level > self . depth or any ( cond ( content ) for cond in self . filter_conds ) ) :
self . _headings . append ( ( token . level , content ) )
return rendered |
def dayofyear ( self ) :
"""Day of the year index ( the first of January = 0 . . . ) .
For reasons of consistency between leap years and non - leap years ,
assuming a daily time step , index 59 is always associated with the
29th of February . Hence , it is missing in non - leap years :
> > > from hydpy import pub
> > > from hydpy . core . indextools import Indexer
> > > pub . timegrids = ' 27.02.2004 ' , ' 3.03.2004 ' , ' 1d '
> > > Indexer ( ) . dayofyear
array ( [ 57 , 58 , 59 , 60 , 61 ] )
> > > pub . timegrids = ' 27.02.2005 ' , ' 3.03.2005 ' , ' 1d '
> > > Indexer ( ) . dayofyear
array ( [ 57 , 58 , 60 , 61 ] )""" | def _dayofyear ( date ) :
return ( date . dayofyear - 1 + ( ( date . month > 2 ) and ( not date . leapyear ) ) )
return _dayofyear |
def _long2bytes ( n , blocksize = 0 ) :
"""Convert a long integer to a byte string .
If optional blocksize is given and greater than zero , pad the front
of the byte string with binary zeros so that the length is a multiple
of blocksize .""" | # After much testing , this algorithm was deemed to be the fastest .
s = ''
pack = struct . pack
while n > 0 : # # # CHANGED FROM ' > I ' TO ' < I ' . ( DCG )
s = pack ( '<I' , n & 0xffffffffL ) + s
n = n >> 32
# Strip off leading zeros .
for i in range ( len ( s ) ) :
if s [ i ] <> '\000' :
break
else : # Only happens when n = = 0.
s = '\000'
i = 0
s = s [ i : ]
# Add back some pad bytes . This could be done more efficiently
# w . r . t . the de - padding being done above , but sigh . . .
if blocksize > 0 and len ( s ) % blocksize :
s = ( blocksize - len ( s ) % blocksize ) * '\000' + s
return s |
def render_markdown ( text , context = None ) :
"""Turn markdown into HTML .""" | if context is None or not isinstance ( context , dict ) :
context = { }
markdown_html = _transform_markdown_into_html ( text )
sanitised_markdown_html = _sanitise_markdown_html ( markdown_html )
return mark_safe ( sanitised_markdown_html ) |
def _shrink ( v , gamma ) :
"""Soft - shrinkage of an array with parameter gamma .
Parameters
v : array
Array containing the values to be applied to the shrinkage operator
gamma : float
Shrinkage parameter .
Returns
v : array
The same input array after the shrinkage operator was applied .""" | pos = v > gamma
neg = v < - gamma
v [ pos ] -= gamma
v [ neg ] += gamma
v [ np . logical_and ( ~ pos , ~ neg ) ] = .0
return v |
def create_document ( self , doc : Dict , mime_type : str = None , url : str = "http://ex.com/123" , doc_id = None , type_ = None ) -> Document :
"""Factory method to wrap input JSON docs in an ETK Document object .
Args :
doc ( object ) : a JSON object containing a document in CDR format .
mime _ type ( str ) : if doc is a string , the mime _ type tells what it is
url ( str ) : if the doc came from the web , specifies the URL for it
doc _ id
type _
Returns : wrapped Document""" | return Document ( self , doc , mime_type , url , doc_id = doc_id ) . with_type ( type_ ) |
def check_rotation ( rotation ) :
"""checks rotation parameter if illegal value raises exception""" | if rotation not in ALLOWED_ROTATION :
allowed_rotation = ', ' . join ( ALLOWED_ROTATION )
raise UnsupportedRotation ( 'Rotation %s is not allwoed. Allowed are %s' % ( rotation , allowed_rotation ) ) |
def build_gtapp ( appname , dry_run , ** kwargs ) :
"""Build an object that can run ScienceTools application
Parameters
appname : str
Name of the application ( e . g . , gtbin )
dry _ run : bool
Print command but do not run it
kwargs : arguments used to invoke the application
Returns ` GtApp . GtApp ` object that will run the application in question""" | pfiles_orig = _set_pfiles ( dry_run , ** kwargs )
gtapp = GtApp . GtApp ( appname )
update_gtapp ( gtapp , ** kwargs )
_reset_pfiles ( pfiles_orig )
return gtapp |
def bundle ( self , ref , capture_exceptions = False ) :
"""Return a bundle build on a dataset , with the given vid or id reference""" | from . . orm . exc import NotFoundError
if isinstance ( ref , Dataset ) :
ds = ref
else :
try :
ds = self . _db . dataset ( ref )
except NotFoundError :
ds = None
if not ds :
try :
p = self . partition ( ref )
ds = p . _bundle . dataset
except NotFoundError :
ds = None
if not ds :
raise NotFoundError ( 'Failed to find dataset for ref: {}' . format ( ref ) )
b = Bundle ( ds , self )
b . capture_exceptions = capture_exceptions
return b |
def find_adsorption_sites ( self , distance = 2.0 , put_inside = True , symm_reduce = 1e-2 , near_reduce = 1e-2 , positions = [ 'ontop' , 'bridge' , 'hollow' ] , no_obtuse_hollow = True ) :
"""Finds surface sites according to the above algorithm . Returns
a list of corresponding cartesian coordinates .
Args :
distance ( float ) : distance from the coordinating ensemble
of atoms along the miller index for the site ( i . e .
the distance from the slab itself )
put _ inside ( bool ) : whether to put the site inside the cell
symm _ reduce ( float ) : symm reduction threshold
near _ reduce ( float ) : near reduction threshold
positions ( list ) : which positions to include in the site finding
" ontop " : sites on top of surface sites
" bridge " : sites at edges between surface sites in Delaunay
triangulation of surface sites in the miller plane
" hollow " : sites at centers of Delaunay triangulation faces
" subsurface " : subsurface positions projected into miller plane
no _ obtuse _ hollow ( bool ) : flag to indicate whether to include
obtuse triangular ensembles in hollow sites""" | ads_sites = { k : [ ] for k in positions }
if 'ontop' in positions :
ads_sites [ 'ontop' ] = [ s . coords for s in self . surface_sites ]
if 'subsurface' in positions : # Get highest site
ref = self . slab . sites [ np . argmax ( self . slab . cart_coords [ : , 2 ] ) ]
# Project diff between highest site and subs site into miller
ss_sites = [ self . mvec * np . dot ( ref . coords - s . coords , self . mvec ) + s . coords for s in self . subsurface_sites ( ) ]
ads_sites [ 'subsurface' ] = ss_sites
if 'bridge' in positions or 'hollow' in positions :
mesh = self . get_extended_surface_mesh ( )
sop = get_rot ( self . slab )
dt = Delaunay ( [ sop . operate ( m . coords ) [ : 2 ] for m in mesh ] )
# TODO : refactor below to properly account for > 3 - fold
for v in dt . simplices :
if - 1 not in v :
dots = [ ]
for i_corner , i_opp in zip ( range ( 3 ) , ( ( 1 , 2 ) , ( 0 , 2 ) , ( 0 , 1 ) ) ) :
corner , opp = v [ i_corner ] , [ v [ o ] for o in i_opp ]
vecs = [ mesh [ d ] . coords - mesh [ corner ] . coords for d in opp ]
vecs = [ vec / np . linalg . norm ( vec ) for vec in vecs ]
dots . append ( np . dot ( * vecs ) )
# Add bridge sites at midpoints of edges of D . Tri
if 'bridge' in positions :
ads_sites [ "bridge" ] . append ( self . ensemble_center ( mesh , opp ) )
# Prevent addition of hollow sites in obtuse triangles
obtuse = no_obtuse_hollow and ( np . array ( dots ) < 1e-5 ) . any ( )
# Add hollow sites at centers of D . Tri faces
if 'hollow' in positions and not obtuse :
ads_sites [ 'hollow' ] . append ( self . ensemble_center ( mesh , v ) )
ads_sites [ 'all' ] = sum ( ads_sites . values ( ) , [ ] )
for key , sites in ads_sites . items ( ) : # Pare off outer sites for bridge / hollow
if key in [ 'bridge' , 'hollow' ] :
frac_coords = [ self . slab . lattice . get_fractional_coords ( ads_site ) for ads_site in sites ]
frac_coords = [ frac_coord for frac_coord in frac_coords if ( frac_coord [ 0 ] > 1 and frac_coord [ 0 ] < 4 and frac_coord [ 1 ] > 1 and frac_coord [ 1 ] < 4 ) ]
sites = [ self . slab . lattice . get_cartesian_coords ( frac_coord ) for frac_coord in frac_coords ]
if near_reduce :
sites = self . near_reduce ( sites , threshold = near_reduce )
if put_inside :
sites = [ put_coord_inside ( self . slab . lattice , coord ) for coord in sites ]
if symm_reduce :
sites = self . symm_reduce ( sites , threshold = symm_reduce )
sites = [ site + distance * self . mvec for site in sites ]
ads_sites [ key ] = sites
return ads_sites |
def escape_dictionary ( dictionary , datetime_format = '%Y-%m-%d %H:%M:%S' ) :
"""Escape dictionary values with keys as column names and values column values
@ type dictionary : dict
@ param dictionary : Key - values""" | for k , v in dictionary . iteritems ( ) :
if isinstance ( v , datetime . datetime ) :
v = v . strftime ( datetime_format )
if isinstance ( v , basestring ) :
v = CoyoteDb . db_escape ( str ( v ) )
v = '"{}"' . format ( v )
if v is True :
v = 1
if v is False :
v = 0
if v is None :
v = 'NULL'
dictionary [ k ] = v |
def set_position ( self , pos , which = 'both' ) :
"""Identical to Axes . set _ position ( This docstring is overwritten ) .""" | self . _polar . set_position ( pos , which )
if self . _overlay_axes is not None :
self . _overlay_axes . set_position ( pos , which )
LambertAxes . set_position ( self , pos , which ) |
def add_link_headers ( response , links ) :
"""Return * response * with the proper link headers set , based on the contents
of * links * .
: param response : : class : ` flask . Response ` response object for links to be
added
: param dict links : Dictionary of links to be added
: rtype : class : ` flask . Response ` :""" | link_string = '<{}>; rel=self' . format ( links [ 'self' ] )
for link in links . values ( ) :
link_string += ', <{}>; rel=related' . format ( link )
response . headers [ 'Link' ] = link_string
return response |
def apply_security_groups ( name , security_groups , region = None , key = None , keyid = None , profile = None ) :
'''Apply security groups to ELB .
CLI example :
. . code - block : : bash
salt myminion boto _ elb . apply _ security _ groups myelb ' [ " mysecgroup1 " ] ' ''' | conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
if isinstance ( security_groups , six . string_types ) :
security_groups = salt . utils . json . loads ( security_groups )
try :
conn . apply_security_groups_to_lb ( name , security_groups )
log . info ( 'Applied security_groups on ELB %s' , name )
return True
except boto . exception . BotoServerError as e :
log . debug ( e )
log . error ( 'Failed to appply security_groups on ELB %s: %s' , name , e . message )
return False |
def remove ( self ) :
"""todo : Docstring for remove
: return :
: rtype :""" | logger . debug ( "" )
rd = self . repo_dir
logger . debug ( "pkg path %s" , rd )
if not rd :
print ( "unable to find pkg '%s'. %s" % ( self . name , did_u_mean ( self . name ) ) )
return
# Does the repo have any uncommitted changes ?
# Is the repo out of sync ( needs a push ? )
# Are you sure ?
resp = input ( self . term . red ( "Are you sure you want to remove the '%s' pkg? [y|N] " % self . name ) )
if resp == 'y' or resp == 'yes' :
self . pr_atten ( 'removing {}...' , self . name )
shutil . rmtree ( rd ) |
def __learn_labels ( self , labels ) :
"""Learns new labels , this method is intended for internal use
Args :
labels ( : obj : ` list ` of : obj : ` str ` ) : Labels to learn""" | if self . feature_length > 0 :
result = list ( self . labels . classes_ )
else :
result = [ ]
for label in labels :
result . append ( label )
self . labels . fit ( result ) |
def upload_process_reach_files ( output_dir , pmid_info_dict , reader_version , num_cores ) : # At this point , we have a directory full of JSON files
# Collect all the prefixes into a set , then iterate over the prefixes
# Collect prefixes
json_files = glob . glob ( os . path . join ( output_dir , '*.json' ) )
json_prefixes = set ( [ ] )
for json_file in json_files :
filename = os . path . basename ( json_file )
prefix = filename . split ( '.' ) [ 0 ]
json_prefixes . add ( prefix )
# Make a list with PMID and source _ text info
logger . info ( "Uploading reading results for reach." )
pmid_json_tuples = [ ]
for json_prefix in json_prefixes :
try :
full_json = upload_reach_readings ( json_prefix , pmid_info_dict [ json_prefix ] . get ( 'content_source' ) , reader_version , output_dir )
pmid_json_tuples . append ( ( json_prefix , full_json ) )
except Exception as e :
logger . error ( "Caught an exception while trying to upload reach " "reading results onto s3 for %s." % json_prefix )
logger . exception ( e )
# Create a multiprocessing pool
logger . info ( 'Creating a multiprocessing pool with %d cores' % num_cores )
# Get a multiprocessing pool .
pool = mp . Pool ( num_cores )
logger . info ( 'Processing local REACH JSON files' )
res = pool . map ( upload_process_pmid , pmid_json_tuples )
stmts_by_pmid = { pmid : stmts for res_dict in res for pmid , stmts in res_dict . items ( ) }
pool . close ( )
logger . info ( 'Multiprocessing pool closed.' )
pool . join ( )
logger . info ( 'Multiprocessing pool joined.' )
"""logger . info ( ' Uploaded REACH JSON for % d files to S3 ( % d failures ) ' %
( num _ uploaded , num _ failures ) )
failures _ file = os . path . join ( output _ dir , ' failures . txt ' )
with open ( failures _ file , ' wt ' ) as f :
for fail in failures :
f . write ( ' % s \n ' % fail )""" | return stmts_by_pmid |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.