signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
async def resolve ( self , client ) :
"""Helper method to allow event builders to be resolved before usage""" | if self . resolved :
return
if not self . _resolve_lock :
self . _resolve_lock = asyncio . Lock ( loop = client . loop )
async with self . _resolve_lock :
if not self . resolved :
await self . _resolve ( client )
self . resolved = True |
def AddShapePointObjectUnsorted ( self , shapepoint , problems ) :
"""Insert a point into a correct position by sequence .""" | if ( len ( self . sequence ) == 0 or shapepoint . shape_pt_sequence >= self . sequence [ - 1 ] ) :
index = len ( self . sequence )
elif shapepoint . shape_pt_sequence <= self . sequence [ 0 ] :
index = 0
else :
index = bisect . bisect ( self . sequence , shapepoint . shape_pt_sequence )
if shapepoint . shape_pt_sequence in self . sequence :
problems . InvalidValue ( 'shape_pt_sequence' , shapepoint . shape_pt_sequence , 'The sequence number %d occurs more than once in ' 'shape %s.' % ( shapepoint . shape_pt_sequence , self . shape_id ) )
if shapepoint . shape_dist_traveled is not None and len ( self . sequence ) > 0 :
if ( index != len ( self . sequence ) and shapepoint . shape_dist_traveled > self . distance [ index ] ) :
problems . InvalidValue ( 'shape_dist_traveled' , shapepoint . shape_dist_traveled , 'Each subsequent point in a shape should have ' 'a distance value that shouldn\'t be larger ' 'than the next ones. In this case, the next ' 'distance was %f.' % self . distance [ index ] )
if ( index > 0 and shapepoint . shape_dist_traveled < self . distance [ index - 1 ] ) :
problems . InvalidValue ( 'shape_dist_traveled' , shapepoint . shape_dist_traveled , 'Each subsequent point in a shape should have ' 'a distance value that\'s at least as large as ' 'the previous ones. In this case, the previous ' 'distance was %f.' % self . distance [ index - 1 ] )
if shapepoint . shape_dist_traveled > self . max_distance :
self . max_distance = shapepoint . shape_dist_traveled
self . sequence . insert ( index , shapepoint . shape_pt_sequence )
self . distance . insert ( index , shapepoint . shape_dist_traveled )
self . points . insert ( index , ( shapepoint . shape_pt_lat , shapepoint . shape_pt_lon , shapepoint . shape_dist_traveled ) ) |
def from_dict ( cls , data , read_only = False ) :
'''Recreate a feature collection from a dictionary .
The dictionary is of the format dumped by : meth : ` to _ dict ` .
Additional information , such as whether the feature collection
should be read - only , is not included in this dictionary , and
is instead passed as parameters to this function .''' | fc = cls ( read_only = read_only )
fc . _features = { }
fc . _from_dict_update ( data )
return fc |
def find ( self , requirement , meta_extras = None , prereleases = False ) :
"""Find a distribution and all distributions it depends on .
: param requirement : The requirement specifying the distribution to
find , or a Distribution instance .
: param meta _ extras : A list of meta extras such as : test : , : build : and
so on .
: param prereleases : If ` ` True ` ` , allow pre - release versions to be
returned - otherwise , don ' t return prereleases
unless they ' re all that ' s available .
Return a set of : class : ` Distribution ` instances and a set of
problems .
The distributions returned should be such that they have the
: attr : ` required ` attribute set to ` ` True ` ` if they were
from the ` ` requirement ` ` passed to ` ` find ( ) ` ` , and they have the
: attr : ` build _ time _ dependency ` attribute set to ` ` True ` ` unless they
are post - installation dependencies of the ` ` requirement ` ` .
The problems should be a tuple consisting of the string
` ` ' unsatisfied ' ` ` and the requirement which couldn ' t be satisfied
by any distribution known to the locator .""" | self . provided = { }
self . dists = { }
self . dists_by_name = { }
self . reqts = { }
meta_extras = set ( meta_extras or [ ] )
if ':*:' in meta_extras :
meta_extras . remove ( ':*:' )
# : meta : and : run : are implicitly included
meta_extras |= set ( [ ':test:' , ':build:' , ':dev:' ] )
if isinstance ( requirement , Distribution ) :
dist = odist = requirement
logger . debug ( 'passed %s as requirement' , odist )
else :
dist = odist = self . locator . locate ( requirement , prereleases = prereleases )
if dist is None :
raise DistlibException ( 'Unable to locate %r' % requirement )
logger . debug ( 'located %s' , odist )
dist . requested = True
problems = set ( )
todo = set ( [ dist ] )
install_dists = set ( [ odist ] )
while todo :
dist = todo . pop ( )
name = dist . key
# case - insensitive
if name not in self . dists_by_name :
self . add_distribution ( dist )
else : # import pdb ; pdb . set _ trace ( )
other = self . dists_by_name [ name ]
if other != dist :
self . try_to_replace ( dist , other , problems )
ireqts = dist . run_requires | dist . meta_requires
sreqts = dist . build_requires
ereqts = set ( )
if meta_extras and dist in install_dists :
for key in ( 'test' , 'build' , 'dev' ) :
e = ':%s:' % key
if e in meta_extras :
ereqts |= getattr ( dist , '%s_requires' % key )
all_reqts = ireqts | sreqts | ereqts
for r in all_reqts :
providers = self . find_providers ( r )
if not providers :
logger . debug ( 'No providers found for %r' , r )
provider = self . locator . locate ( r , prereleases = prereleases )
# If no provider is found and we didn ' t consider
# prereleases , consider them now .
if provider is None and not prereleases :
provider = self . locator . locate ( r , prereleases = True )
if provider is None :
logger . debug ( 'Cannot satisfy %r' , r )
problems . add ( ( 'unsatisfied' , r ) )
else :
n , v = provider . key , provider . version
if ( n , v ) not in self . dists :
todo . add ( provider )
providers . add ( provider )
if r in ireqts and dist in install_dists :
install_dists . add ( provider )
logger . debug ( 'Adding %s to install_dists' , provider . name_and_version )
for p in providers :
name = p . key
if name not in self . dists_by_name :
self . reqts . setdefault ( p , set ( ) ) . add ( r )
else :
other = self . dists_by_name [ name ]
if other != p : # see if other can be replaced by p
self . try_to_replace ( p , other , problems )
dists = set ( self . dists . values ( ) )
for dist in dists :
dist . build_time_dependency = dist not in install_dists
if dist . build_time_dependency :
logger . debug ( '%s is a build-time dependency only.' , dist . name_and_version )
logger . debug ( 'find done for %s' , odist )
return dists , problems |
def check_hmc_diagnostics ( fit , pars = None , verbose = True , per_chain = False , checks = None ) :
"""Checks all hmc diagnostics
Parameters
fit : StanFit4Model object
verbose : bool or int , optional
If ` ` verbose ` ` is ` ` False ` ` or a nonpositive integer , no
diagnostic messages are printed , and only the return value of
the function conveys diagnostic information . If it is ` ` True ` `
( the default ) or an integer greater than zero , then diagnostic
messages are printed only for diagnostic checks that fail . If
` ` verbose ` ` is an integer greater than 1 , then parameter
( quantile ) diagnostics are printed . If ` ` verbose ` ` is
greater than 2 , then extra diagnostic messages are printed .
per _ chain : bool , optional
Where applicable , print diagnostics on a per - chain basis . This
applies mainly to the divergence and treedepth checks .
checks : list , { " n _ eff " , " Rhat " , " divergence " , " treedepth " , " energy " } , optional
By default run all checks . If ` ` checks ` ` is defined , run only
checks given in ` ` checks ` `
Returns
out _ dict : dict
A dictionary where each key is the name of a diagnostic check ,
and the value associated with each key is a Boolean value that
is True if the check passed and False otherwise . Possible
valid keys are ' n _ eff ' , ' Rhat ' , ' divergence ' , ' treedepth ' , and
' energy ' , though which keys are available will depend upon the
sampling algorithm used .""" | # For consistency with the individual diagnostic functions
verbosity = int ( verbose )
all_checks = { "n_eff" , "Rhat" , "divergence" , "treedepth" , "energy" }
if checks is None :
checks = all_checks
else :
undefined_checks = [ ]
for c in checks : # accept lowercase Rhat
if c == "rhat" :
continue
if c not in all_checks :
undefined_checks . append ( c )
if undefined_checks :
ucstr = "[" + ", " . join ( undefined_checks ) + "]"
msg = "checks: {} are not legal checks: {}" . format ( ucstr , all_checks )
raise TypeError ( msg )
out_dict = { }
if "n_eff" in checks :
try :
out_dict [ 'n_eff' ] = check_n_eff ( fit , pars , verbose )
except ValueError :
if verbosity > 0 :
logger . warning ( 'Skipping check of effective sample size (n_eff)' )
if ( "Rhat" in checks ) or ( "rhat" in checks ) :
try :
out_dict [ 'Rhat' ] = check_rhat ( fit , pars , verbose )
except ValueError :
if verbosity > 0 :
logger . warning ( 'Skipping check of potential scale reduction factors (Rhat)' )
if "divergence" in checks :
try :
out_dict [ 'divergence' ] = check_div ( fit , verbose , per_chain )
except ValueError :
if verbosity > 0 :
logger . warning ( 'Skipping check of divergent transitions (divergence)' )
if "treedepth" in checks :
try :
out_dict [ 'treedepth' ] = check_treedepth ( fit , verbose , per_chain )
except ValueError :
if verbosity > 0 :
logger . warning ( 'Skipping check of transitions ending prematurely due to maximum tree depth limit (treedepth)' )
if "energy" in checks :
try :
out_dict [ 'energy' ] = check_energy ( fit , verbose )
except ValueError :
if verbosity > 0 :
logger . warning ( 'Skipping check of E-BFMI (energy)' )
return out_dict |
def get_assessment_part_admin_session_for_bank ( self , bank_id , proxy ) :
"""Gets the ` ` OsidSession ` ` associated with the assessment part administration service for the given bank .
arg : bank _ id ( osid . id . Id ) : the ` ` Id ` ` of the ` ` Bank ` `
arg : proxy ( osid . proxy . Proxy ) : a proxy
return : ( osid . assessment . authoring . AssessmentPartAdminSession ) -
an ` ` AssessmentPartAdminSession ` `
raise : NotFound - no ` ` Bank ` ` found by the given ` ` Id ` `
raise : NullArgument - ` ` bank _ id or proxy is null ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ assessment _ part _ admin ( ) ` ` or
` ` supports _ visible _ federation ( ) ` ` is ` ` false ` `
* compliance : optional - - This method must be implemented if
` ` supports _ assessment _ part _ admin ( ) ` ` and
` ` supports _ visible _ federation ( ) ` ` are ` ` true ` ` . *""" | if not self . supports_assessment_part_admin ( ) :
raise errors . Unimplemented ( )
# Also include check to see if the catalog Id is found otherwise raise errors . NotFound
# pylint : disable = no - member
return sessions . AssessmentPartAdminSession ( bank_id , proxy , self . _runtime ) |
def structure ( self , obj , cl ) : # type : ( Any , Type [ T ] ) - > T
"""Convert unstructured Python data structures to structured data .""" | return self . _structure_func . dispatch ( cl ) ( obj , cl ) |
def _replace_variable_with_pattern ( match ) :
"""Replace a variable match with a pattern that can be used to validate it .
Args :
match ( re . Match ) : A regular expression match
Returns :
str : A regular expression pattern that can be used to validate the
variable in an expanded path .
Raises :
ValueError : If an unexpected template expression is encountered .""" | positional = match . group ( "positional" )
name = match . group ( "name" )
template = match . group ( "template" )
if name is not None :
if not template :
return _SINGLE_SEGMENT_PATTERN . format ( name )
elif template == "**" :
return _MULTI_SEGMENT_PATTERN . format ( name )
else :
return _generate_pattern_for_template ( template )
elif positional == "*" :
return _SINGLE_SEGMENT_PATTERN
elif positional == "**" :
return _MULTI_SEGMENT_PATTERN
else :
raise ValueError ( "Unknown template expression {}" . format ( match . group ( 0 ) ) ) |
def _make_signed_headers ( headers , headers_to_sign ) :
"""Return a semicolon - delimited list of headers to sign .
@ param headers : The request headers .
@ type headers : L { dict }
@ param headers _ to _ sign : A sequence of header names that should be
signed .
@ type headers _ to _ sign : L { bytes }
@ return : The semicolon - delimited list of headers .
@ rtype : L { bytes }""" | return b";" . join ( header . lower ( ) for header in sorted ( headers_to_sign ) if header in headers ) |
def configure ( self , config ) :
"""Configure the amount of characters to skip .""" | self . _prefix_length = config . getint ( 'skip{suffix}' . format ( suffix = self . _option_suffix ) , fallback = self . _prefix_length ) |
def strip_required_suffix ( string , suffix ) :
"""> > > strip _ required _ suffix ( ' abcdef ' , ' def ' )
' abc '
> > > strip _ required _ suffix ( ' abcdef ' , ' 123 ' )
Traceback ( most recent call last ) :
AssertionError : String ends with ' def ' , not ' 123'""" | if string . endswith ( suffix ) :
return string [ : - len ( suffix ) ]
raise AssertionError ( 'String ends with %r, not %r' % ( string [ - len ( suffix ) : ] , suffix ) ) |
def _set_pw_profile ( self , v , load = False ) :
"""Setter method for pw _ profile , mapped from YANG variable / pw _ profile ( list )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ pw _ profile is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ pw _ profile ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = YANGListType ( "pw_profile_name" , pw_profile . pw_profile , yang_name = "pw-profile" , rest_name = "pw-profile" , parent = self , is_container = 'list' , user_ordered = False , path_helper = self . _path_helper , yang_keys = 'pw-profile-name' , extensions = { u'tailf-common' : { u'info' : u'pw-profile for Node Specific configuration' , u'callpoint' : u'PWProfileBasicCallpoint' , u'cli-mode-name' : u'config-pw-profile-$(pw-profile-name)' } } ) , is_container = 'list' , yang_name = "pw-profile" , rest_name = "pw-profile" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'pw-profile for Node Specific configuration' , u'callpoint' : u'PWProfileBasicCallpoint' , u'cli-mode-name' : u'config-pw-profile-$(pw-profile-name)' } } , namespace = 'urn:brocade.com:mgmt:brocade-pw-profile' , defining_module = 'brocade-pw-profile' , yang_type = 'list' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """pw_profile must be of a type compatible with list""" , 'defined-type' : "list" , 'generated-type' : """YANGDynClass(base=YANGListType("pw_profile_name",pw_profile.pw_profile, yang_name="pw-profile", rest_name="pw-profile", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='pw-profile-name', extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}), is_container='list', yang_name="pw-profile", rest_name="pw-profile", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'pw-profile for Node Specific configuration', u'callpoint': u'PWProfileBasicCallpoint', u'cli-mode-name': u'config-pw-profile-$(pw-profile-name)'}}, namespace='urn:brocade.com:mgmt:brocade-pw-profile', defining_module='brocade-pw-profile', yang_type='list', is_config=True)""" , } )
self . __pw_profile = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def markdown_to_text ( self , source ) :
"""Escape the given source , for a markdown cell""" | if self . comment and self . comment != "#'" :
source = copy ( source )
comment_magic ( source , self . language , self . comment_magics )
return comment_lines ( source , self . comment ) |
def _set_brief ( self , v , load = False ) :
"""Setter method for brief , mapped from YANG variable / ptp _ state / brief ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ brief is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ brief ( ) directly .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = brief . brief , is_container = 'container' , presence = False , yang_name = "brief" , rest_name = "brief" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'callpoint' : u'ptp-brief' , u'cli-suppress-show-path' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-ptp-operational' , defining_module = 'brocade-ptp-operational' , yang_type = 'container' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """brief must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=brief.brief, is_container='container', presence=False, yang_name="brief", rest_name="brief", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'ptp-brief', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-ptp-operational', defining_module='brocade-ptp-operational', yang_type='container', is_config=False)""" , } )
self . __brief = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def append ( self , path , data , ** kwargs ) :
"""Append to the given file .
: param data : ` ` bytes ` ` or a ` ` file ` ` - like object
: param buffersize : The size of the buffer used in transferring data .
: type buffersize : int""" | metadata_response = self . _post ( path , 'APPEND' , expected_status = httplib . TEMPORARY_REDIRECT , ** kwargs )
data_response = self . _requests_session . post ( metadata_response . headers [ 'location' ] , data = data , ** self . _requests_kwargs )
_check_response ( data_response )
assert not data_response . content |
def create_project_config_path ( path , mode = 0o777 , parents = False , exist_ok = False ) :
"""Create new project configuration folder .""" | # FIXME check default directory mode
project_path = Path ( path ) . absolute ( ) . joinpath ( RENKU_HOME )
project_path . mkdir ( mode = mode , parents = parents , exist_ok = exist_ok )
return str ( project_path ) |
def pretty_print_match ( match , parameterized = True ) :
"""Return a human - readable representation of a parameterized MATCH query string .""" | left_curly = '{{' if parameterized else '{'
right_curly = '}}' if parameterized else '}'
match = remove_custom_formatting ( match )
parts = re . split ( '({}|{})' . format ( left_curly , right_curly ) , match )
inside_braces = False
indent_size = 4
indent = ' ' * indent_size
output = [ parts [ 0 ] ]
for current_index , current_part in enumerate ( parts [ 1 : ] ) :
if current_part == left_curly :
if inside_braces :
raise AssertionError ( u'Found open-braces pair while already inside braces: ' u'{} {} {}' . format ( current_index , parts , match ) )
inside_braces = True
output . append ( current_part + '\n' )
elif current_part == right_curly :
if not inside_braces :
raise AssertionError ( u'Found close-braces pair while not inside braces: ' u'{} {} {}' . format ( current_index , parts , match ) )
inside_braces = False
output . append ( current_part )
else :
if not inside_braces :
stripped_part = current_part . lstrip ( )
if stripped_part . startswith ( '.' ) : # Strip whitespace before traversal steps .
output . append ( stripped_part )
else : # Do not strip whitespace before e . g . the RETURN keyword .
output . append ( current_part )
else : # Split out the keywords , initially getting rid of commas .
separate_keywords = re . split ( ', ([a-z]+:)' , current_part )
# The first item in the separated list is the full first " keyword : value " pair .
# For every subsequent item , the keyword and value are separated ; join them
# back together , outputting the comma , newline and indentation before them .
output . append ( indent + separate_keywords [ 0 ] . lstrip ( ) )
for i in six . moves . xrange ( 1 , len ( separate_keywords ) - 1 , 2 ) :
output . append ( ',\n{indent}{keyword} {value}' . format ( keyword = separate_keywords [ i ] . strip ( ) , value = separate_keywords [ i + 1 ] . strip ( ) , indent = indent ) )
output . append ( '\n' )
return '' . join ( output ) . strip ( ) |
def normalize_locale ( locale ) :
"""Normalize locale
Extracts language code from passed in locale string to be used later
for dictionaries loading .
: param locale : string , locale ( en , en _ US )
: return : string , language code""" | import re
match = re . match ( r'^[a-z]+' , locale . lower ( ) )
if match :
return match . group ( ) |
def viewvalues ( obj , ** kwargs ) :
"""Function for iterating over dictionary values with the same set - like
behaviour on Py2.7 as on Py3.
Passes kwargs to method .""" | func = getattr ( obj , "viewvalues" , None )
if not func :
func = obj . values
return func ( ** kwargs ) |
def main ( ) :
"""Sanitizes the loaded * . ipynb .""" | with open ( sys . argv [ 1 ] , 'r' ) as nbfile :
notebook = json . load ( nbfile )
# remove kernelspec ( venvs )
try :
del notebook [ 'metadata' ] [ 'kernelspec' ]
except KeyError :
pass
# remove outputs and metadata , set execution counts to None
for cell in notebook [ 'cells' ] :
try :
if cell [ 'cell_type' ] == 'code' :
cell [ 'outputs' ] = [ ]
cell [ 'execution_count' ] = None
cell [ 'metadata' ] = { }
except KeyError :
pass
with open ( sys . argv [ 1 ] , 'w' ) as nbfile :
json . dump ( notebook , nbfile , indent = 1 ) |
def get_adjacency_matrix ( df_connected ) :
'''Return matrix where $ a _ { i , j } = 1 $ indicates polygon $ i $ is connected to
polygon $ j $ .
Also , return mapping ( and reverse mapping ) from original keys in
` df _ connected ` to zero - based integer index used for matrix rows and
columns .''' | sorted_path_keys = np . sort ( np . unique ( df_connected [ [ 'source' , 'target' ] ] . values . ravel ( ) ) )
indexed_paths = pd . Series ( sorted_path_keys )
path_indexes = pd . Series ( indexed_paths . index , index = sorted_path_keys )
adjacency_matrix = np . zeros ( ( path_indexes . shape [ 0 ] , ) * 2 , dtype = int )
for i_key , j_key in df_connected [ [ 'source' , 'target' ] ] . values :
i , j = path_indexes . loc [ [ i_key , j_key ] ]
adjacency_matrix [ i , j ] = 1
adjacency_matrix [ j , i ] = 1
return adjacency_matrix , indexed_paths , path_indexes |
def parse_callable ( path : str ) -> Iterator :
"""ConfigParser converter .
Calls the specified object , e . g . Option " id _ generators . decimal " returns
` id _ generators . decimal ( ) ` .""" | module = path [ : path . rindex ( "." ) ]
callable_name = path [ path . rindex ( "." ) + 1 : ]
callable_ = getattr ( importlib . import_module ( module ) , callable_name )
return callable_ ( ) |
def getReflexRuleSetup ( self ) :
"""Return a json dict with all the setup data necessary to build the
relations :
- Relations between methods and analysis services options .
- The current saved data
the functions returns :
{ ' < method _ uid > ' : {
' analysisservices ' : {
' < as _ uid > ' : { ' as _ id ' : ' < as _ id > ' ,
' as _ title ' : ' < as _ title > ' ,
' resultoptions ' : [ , , ] }
' < as _ uid > ' : { ' as _ id ' : ' < as _ id > ' ,
' as _ title ' : ' < as _ title > ' ,
' resultoptions ' : [ {
' ResultText ' : ' Failed ' ,
' ResultValue ' : ' 1 ' , ' value ' : ' ' } ,
' as _ keys ' : [ ' < as _ uid > ' , ' < as _ uid > ' ] ,
' method _ id ' : ' < method _ id > ' ,
' method _ tile ' : ' < method _ tile > '
' < method _ uid > ' : {
' analysisservices ' : {
' < as _ uid > ' : { ' as _ id ' : ' < as _ id > ' ,
' as _ title ' : ' < as _ title > ' ,
' resultoptions ' : [ , , ] }
' < as _ uid > ' : { ' as _ id ' : ' < as _ id > ' ,
' as _ title ' : ' < as _ title > ' ,
' resultoptions ' : [ , , ] }
' as _ keys ' : [ ' < as _ uid > ' , ' < as _ uid > ' ] ,
' method _ id ' : ' < method _ id > ' ,
' method _ tile ' : ' < method _ tile > '
' saved _ actions ' : { ' rules ' : [
{ ' actions ' : [ { ' act _ row _ idx ' : 0,
' action ' : ' repeat ' ,
' an _ result _ id ' : ' ' ,
' analyst ' : ' ' ,
' otherWS ' : current ,
' setresultdiscrete ' : ' ' ,
' setresulton ' : ' original ' ,
' setresultvalue ' : ' ' ,
' worksheettemplate ' : ' 70d48adfb34c4231a145f76a858e94cf ' , } ] ,
' conditions ' : [ { ' analysisservice ' : ' d802cdbf1f4742c094d45997b1038f9c ' ,
' and _ or ' : ' no ' ,
' cond _ row _ idx ' : 0,
' discreteresult ' : ' ' ,
' range0 ' : ' 12 ' ,
' range1 ' : ' 12 ' } ] ,
' rulenumber ' : ' 1 ' ,
' trigger ' : ' submit ' } , . . . ] ,
' method _ id ' : ' < method _ uid > ' ,
' method _ tile ' : ' < method _ tile > ' ,
' method _ uid ' : ' < method _ uid > '""" | relations = { }
# Getting all the methods from the system
pc = getToolByName ( self , 'portal_catalog' )
methods = [ obj . getObject ( ) for obj in pc ( portal_type = 'Method' , is_active = True ) ]
bsc = getToolByName ( self , 'bika_setup_catalog' )
for method in methods : # Get the analysis services related to each method
an_servs_brains = bsc ( portal_type = 'AnalysisService' , getMethodUIDs = { "query" : method . UID ( ) , "operator" : "or" } )
analysiservices = { }
for analysiservice in an_servs_brains :
analysiservice = analysiservice . getObject ( )
# Getting the worksheet templates that could be used with the
# analysis , those worksheet templates are the ones without
# method and the ones with a method shared with the
# analysis service .
service_methods_uid = analysiservice . getAvailableMethodUIDs ( )
query_dict = { 'portal_type' : 'WorksheetTemplate' , 'is_active' : True , 'sort_on' : 'sortable_title' , 'getMethodUID' : { "query" : service_methods_uid + [ '' ] , "operator" : "or" } }
wst_brains = bsc ( query_dict )
analysiservices [ analysiservice . UID ( ) ] = { 'as_id' : analysiservice . getId ( ) , 'as_title' : analysiservice . Title ( ) , 'resultoptions' : analysiservice . getResultOptions ( ) if analysiservice . getResultOptions ( ) else [ ] , 'wstoptions' : [ ( brain . UID , brain . Title ) for brain in wst_brains ] }
# Make the json dict
relations [ method . UID ( ) ] = { 'method_id' : method . getId ( ) , 'method_tile' : method . Title ( ) , 'analysisservices' : analysiservices , 'as_keys' : analysiservices . keys ( ) , }
# Get the data saved in the object
reflex_rule = self . aq_parent . aq_inner
saved_method = reflex_rule . getMethod ( )
relations [ 'saved_actions' ] = { 'method_uid' : saved_method . UID ( ) if saved_method else '' , 'method_id' : saved_method . getId ( ) if saved_method else '' , 'method_tile' : saved_method . Title ( ) if saved_method else '' , 'rules' : reflex_rule . getReflexRules ( ) , }
return json . dumps ( relations ) |
def _parse ( self , roi = None , filenames = None ) :
"""Parse catalog FITS files into recarray .
Parameters :
roi : The region of interest ; if ' roi = None ' , read all catalog files
Returns :
None""" | if ( roi is not None ) and ( filenames is not None ) :
msg = "Cannot take both roi and filenames"
raise Exception ( msg )
if roi is not None :
pixels = roi . getCatalogPixels ( )
filenames = self . config . getFilenames ( ) [ 'catalog' ] [ pixels ]
elif filenames is None :
filenames = self . config . getFilenames ( ) [ 'catalog' ] . compressed ( )
else :
filenames = np . atleast_1d ( filenames )
if len ( filenames ) == 0 :
msg = "No catalog files found."
raise Exception ( msg )
# Load the data
self . data = load_infiles ( filenames )
# Apply a selection cut
self . _applySelection ( )
# Cast data to recarray ( historical reasons )
self . data = self . data . view ( np . recarray ) |
def _cfg ( key , default = None ) :
'''Return the requested value from the aws _ kms key in salt configuration .
If it ' s not set , return the default .''' | root_cfg = __salt__ . get ( 'config.get' , __opts__ . get )
kms_cfg = root_cfg ( 'aws_kms' , { } )
return kms_cfg . get ( key , default ) |
def get_request_body ( self ) :
"""Decodes the request body and returns it .
: return : the decoded request body as a : class : ` dict ` instance .
: raises : : class : ` tornado . web . HTTPError ` if the body cannot be
decoded ( 415 ) or if decoding fails ( 400)""" | if self . _request_body is None :
content_type_str = self . request . headers . get ( 'Content-Type' , 'application/octet-stream' )
LOGGER . debug ( 'decoding request body of type %s' , content_type_str )
content_type = headers . parse_content_type ( content_type_str )
try :
selected , requested = algorithms . select_content_type ( [ content_type ] , _content_types . values ( ) )
except errors . NoMatch :
raise web . HTTPError ( 415 , 'cannot decoded content type %s' , content_type_str , reason = 'Unexpected content type' )
handler = _content_handlers [ str ( selected ) ]
try :
self . _request_body = handler . unpack_bytes ( self . request . body , encoding = content_type . parameters . get ( 'charset' ) , )
except ValueError as error :
raise web . HTTPError ( 400 , 'failed to decode content body - %r' , error , reason = 'Content body decode failure' )
return self . _request_body |
def fit ( self , ini_betas = None , tol = 1.0e-6 , max_iter = 200 , solve = 'iwls' ) :
"""Method that fits a model with a particular estimation routine .
Parameters
ini _ betas : array
k * 1 , initial coefficient values , including constant .
Default is None , which calculates initial values during
estimation .
tol : float
Tolerence for estimation convergence .
max _ iter : integer
Maximum number of iterations if convergence not
achieved .
solve : string
Technique to solve MLE equations .
' iwls ' = iteratively ( re ) weighted least squares ( default )""" | self . fit_params [ 'ini_betas' ] = ini_betas
self . fit_params [ 'tol' ] = tol
self . fit_params [ 'max_iter' ] = max_iter
self . fit_params [ 'solve' ] = solve
if solve . lower ( ) == 'iwls' :
params , predy , w , n_iter = iwls ( self . y , self . X , self . family , self . offset , self . y_fix , ini_betas , tol , max_iter )
self . fit_params [ 'n_iter' ] = n_iter
return GLMResults ( self , params . flatten ( ) , predy , w ) |
def make_colormap ( seq , name = "CustomMap" , plot = False ) :
"""Generate a LinearSegmentedColormap .
Parameters
seq : list of tuples
A sequence of floats and RGB - tuples . The floats should be increasing
and in the interval ( 0,1 ) .
name : string ( optional )
A name for the colormap
plot : boolean ( optional )
Use to generate a plot of the colormap ( Default is False ) .
Returns
matplotlib . colors . LinearSegmentedColormap
` Source ` _ _
_ _ http : / / nbviewer . ipython . org / gist / anonymous / a4fa0adb08f9e9ea4f94""" | seq = [ ( None , ) * 3 , 0.0 ] + list ( seq ) + [ 1.0 , ( None , ) * 3 ]
cdict = { "red" : [ ] , "green" : [ ] , "blue" : [ ] }
for i , item in enumerate ( seq ) :
if isinstance ( item , float ) :
r1 , g1 , b1 = seq [ i - 1 ]
r2 , g2 , b2 = seq [ i + 1 ]
cdict [ "red" ] . append ( [ item , r1 , r2 ] )
cdict [ "green" ] . append ( [ item , g1 , g2 ] )
cdict [ "blue" ] . append ( [ item , b1 , b2 ] )
cmap = mplcolors . LinearSegmentedColormap ( name , cdict )
if plot :
plot_colormap_components ( cmap )
return cmap |
def start_reporter ( redis_address , stdout_file = None , stderr_file = None , redis_password = None ) :
"""Start a reporter process .
Args :
redis _ address ( str ) : The address of the Redis instance .
stdout _ file : A file handle opened for writing to redirect stdout to . If
no redirection should happen , then this should be None .
stderr _ file : A file handle opened for writing to redirect stderr to . If
no redirection should happen , then this should be None .
redis _ password ( str ) : The password of the redis server .
Returns :
ProcessInfo for the process that was started .""" | reporter_filepath = os . path . join ( os . path . dirname ( os . path . abspath ( __file__ ) ) , "reporter.py" )
command = [ sys . executable , "-u" , reporter_filepath , "--redis-address={}" . format ( redis_address ) ]
if redis_password :
command += [ "--redis-password" , redis_password ]
try :
import psutil
# noqa : F401
except ImportError :
logger . warning ( "Failed to start the reporter. The reporter requires " "'pip install psutil'." )
return None
process_info = start_ray_process ( command , ray_constants . PROCESS_TYPE_REPORTER , stdout_file = stdout_file , stderr_file = stderr_file )
return process_info |
def save ( self , copypath = None ) :
"""* save the content of the document back to the file *
* * Key Arguments : * *
- ` ` copypath ` ` - - the path to a new file if you want to make a copy of the document instead of saving it to the original filepath . Default * None *
* * Usage : * *
To save the document to file run :
. . code - block : : python
doc . save ( )
Or to copy the content to another file run the save method with a new filepath as an argument :
. . code - block : : python
doc . save ( " / path / to / saturday - tasks - copy . taskpaper " )""" | self . refresh
if copypath :
self . filepath = copypath
content = self . content
import codecs
# SET ENCODE ERROR RETURN VALUE
writeFile = codecs . open ( self . filepath , encoding = 'utf-8' , mode = 'w' )
writeFile . write ( content )
writeFile . close ( )
return None |
def get_xy_name ( self , yidx , xidx = 0 ) :
"""Return variable names for the given indices
: param yidx :
: param xidx :
: return :""" | assert isinstance ( xidx , int )
if isinstance ( yidx , int ) :
yidx = [ yidx ]
uname = [ 'Time [s]' ] + self . uname
fname = [ '$Time\\ [s]$' ] + self . fname
xname = [ list ( ) , list ( ) ]
yname = [ list ( ) , list ( ) ]
xname [ 0 ] = uname [ xidx ]
xname [ 1 ] = fname [ xidx ]
yname [ 0 ] = [ uname [ i ] for i in yidx ]
yname [ 1 ] = [ fname [ i ] for i in yidx ]
return xname , yname |
def randint ( self , low : int , high : int ) -> int :
"""Return a random integer within the linear range : low < = n < = high .
Args :
low ( int ) : The lower bound of the random range .
high ( int ) : The upper bound of the random range .
Returns :
int : A random integer .""" | return int ( lib . TCOD_random_get_i ( self . random_c , low , high ) ) |
def site_name ( self , site_name ) :
"""Sets api _ version and hash _ string .
Parameters :
site _ name ( str ) : The site name in ' SITE _ LIST ' , default sites .
Raises :
PybooruError : When ' site _ name ' isn ' t valid .""" | # Set base class property site _ name
_Pybooru . site_name . fset ( self , site_name )
if ( 'api_version' and 'hashed_string' ) in SITE_LIST [ site_name ] :
self . api_version = SITE_LIST [ site_name ] [ 'api_version' ]
self . hash_string = SITE_LIST [ site_name ] [ 'hashed_string' ] |
def expect ( self , * args ) :
'''Consume and return the next token if it has the correct type
Multiple token types ( as strings , e . g . ' integer64 ' ) can be given
as arguments . If the next token is one of them , consume and return it .
If the token type doesn ' t match , raise a ConfigParseError .''' | t = self . accept ( * args )
if t is not None :
return t
self . error ( "expected: %r" % ( args , ) ) |
def _parse ( s , g ) :
"""Parses sentence ' s ' using CNF grammar ' g ' .""" | # The CYK table . Indexed with a 2 - tuple : ( start pos , end pos )
table = defaultdict ( set )
# Top - level structure is similar to the CYK table . Each cell is a dict from
# rule name to the best ( lightest ) tree for that rule .
trees = defaultdict ( dict )
# Populate base case with existing terminal production rules
for i , w in enumerate ( s ) :
for terminal , rules in g . terminal_rules . items ( ) :
if match ( terminal , w ) :
for rule in rules :
table [ ( i , i ) ] . add ( rule )
if ( rule . lhs not in trees [ ( i , i ) ] or rule . weight < trees [ ( i , i ) ] [ rule . lhs ] . weight ) :
trees [ ( i , i ) ] [ rule . lhs ] = RuleNode ( rule , [ T ( w ) ] , weight = rule . weight )
# Iterate over lengths of sub - sentences
for l in xrange ( 2 , len ( s ) + 1 ) : # Iterate over sub - sentences with the given length
for i in xrange ( len ( s ) - l + 1 ) : # Choose partition of the sub - sentence in [ 1 , l )
for p in xrange ( i + 1 , i + l ) :
span1 = ( i , p - 1 )
span2 = ( p , i + l - 1 )
for r1 , r2 in itertools . product ( table [ span1 ] , table [ span2 ] ) :
for rule in g . nonterminal_rules . get ( ( r1 . lhs , r2 . lhs ) , [ ] ) :
table [ ( i , i + l - 1 ) ] . add ( rule )
r1_tree = trees [ span1 ] [ r1 . lhs ]
r2_tree = trees [ span2 ] [ r2 . lhs ]
rule_total_weight = rule . weight + r1_tree . weight + r2_tree . weight
if ( rule . lhs not in trees [ ( i , i + l - 1 ) ] or rule_total_weight < trees [ ( i , i + l - 1 ) ] [ rule . lhs ] . weight ) :
trees [ ( i , i + l - 1 ) ] [ rule . lhs ] = RuleNode ( rule , [ r1_tree , r2_tree ] , weight = rule_total_weight )
return table , trees |
def branch ( self ) :
"""Get a flattened representation of the branch .
@ return : A flat list of nodes .
@ rtype : [ L { Element } , . . ]""" | branch = [ self ]
for c in self . children :
branch += c . branch ( )
return branch |
def download ( ui , repo , clname , ** opts ) :
"""download a change from the code review server
Download prints a description of the given change list
followed by its diff , downloaded from the code review server .""" | if codereview_disabled :
raise hg_util . Abort ( codereview_disabled )
cl , vers , patch , err = DownloadCL ( ui , repo , clname )
if err != "" :
return err
ui . write ( cl . EditorText ( ) + "\n" )
ui . write ( patch + "\n" )
return |
def predict_epitopes_from_args ( args ) :
"""Returns an epitope collection from the given commandline arguments .
Parameters
args : argparse . Namespace
Parsed commandline arguments for Topiary""" | mhc_model = mhc_binding_predictor_from_args ( args )
variants = variant_collection_from_args ( args )
gene_expression_dict = rna_gene_expression_dict_from_args ( args )
transcript_expression_dict = rna_transcript_expression_dict_from_args ( args )
predictor = TopiaryPredictor ( mhc_model = mhc_model , padding_around_mutation = args . padding_around_mutation , ic50_cutoff = args . ic50_cutoff , percentile_cutoff = args . percentile_cutoff , min_transcript_expression = args . rna_min_transcript_expression , min_gene_expression = args . rna_min_gene_expression , only_novel_epitopes = args . only_novel_epitopes , raise_on_error = not args . skip_variant_errors )
return predictor . predict_from_variants ( variants = variants , transcript_expression_dict = transcript_expression_dict , gene_expression_dict = gene_expression_dict ) |
def auth ( username , password ) :
"""Middleware implementing authentication via LOGIN .
Most of the time this middleware needs to be placed
* after * TLS .
: param username : Username to login with .
: param password : Password of the user .""" | def middleware ( conn ) :
conn . login ( username , password )
return middleware |
def tree_prune_rank ( self , tree , rank = "species" ) :
"""Takes a TreeNode tree and prunes off any tips not at the specified rank and backwards up
until all of the tips are at the specified rank .
Parameters
tree : ` skbio . tree . TreeNode `
The root node of the tree to perform this operation on .
rank : { kingdom ' , ' phylum ' , ' class ' , ' order ' , ' family ' , ' genus ' , ' species ' } , optional
Analysis will be restricted to abundances of taxa at the specified level .
Returns
` skbio . tree . TreeNode ` , the root of the tree where all tips are at the given rank , and all
tips have a path back to the root node .
Examples
tree _ prune _ rank ( tree , ' species ' ) will remove all subspecies / strain nodes and return a tree
containing all genus - level nodes and higher .""" | if rank is None :
return tree . copy ( )
tree = tree . copy ( )
for node in tree . postorder ( ) :
if node . rank == rank :
node . _above_rank = True
elif any ( [ getattr ( n , "_above_rank" , False ) for n in node . children ] ) :
node . _above_rank = True
else :
node . _above_rank = False
tree . remove_deleted ( lambda n : not getattr ( n , "_above_rank" , False ) )
return tree |
def GetPluginObjectByName ( cls , plugin_name ) :
"""Retrieves a specific plugin object by its name .
Args :
plugin _ name ( str ) : name of the plugin .
Returns :
BasePlugin : a plugin object or None if not available .""" | plugin_class = cls . _plugin_classes . get ( plugin_name , None )
if plugin_class :
return plugin_class ( )
return None |
def histogram ( a , bins = 10 , range = None , normed = False , weights = None , axis = None , strategy = None ) :
"""histogram ( a , bins = 10 , range = None , normed = False , weights = None , axis = None )
- > H , dict
Return the distribution of sample .
: Stochastics :
` a ` : Array sample .
` bins ` : Number of bins , or an array of bin edges , in which case the
range is not used . If ' Scott ' or ' Freeman ' is passed , then
the named method is used to find the optimal number of bins .
` range ` : Lower and upper bin edges , default : [ min , max ] .
` normed ` : Boolean , if False , return the number of samples in each bin ,
if True , return the density .
` weights ` : Sample weights . The weights are normed only if normed is
True . Should weights . sum ( ) not equal len ( a ) , the total bin count
will not be equal to the number of samples .
` axis ` : Specifies the dimension along which the histogram is computed .
Defaults to None , which aggregates the entire sample array .
` strategy ` : Histogramming method ( binsize , searchsorted or digitize ) .
: Return :
` H ` : The number of samples in each bin .
If normed is True , H is a frequency distribution .
dict { ' edges ' : The bin edges , including the rightmost edge .
' upper ' : Upper outliers .
' lower ' : Lower outliers .
' bincenters ' : Center of bins .
' strategy ' : the histogramming method employed . }
: Examples :
> > > x = random . rand ( 100,10)
> > > H , D = histogram ( x , bins = 10 , range = [ 0,1 ] , normed = True )
> > > H2 , D = histogram ( x , bins = 10 , range = [ 0,1 ] , normed = True , axis = 0)
: SeeAlso : histogramnd""" | weighted = weights is not None
a = asarray ( a )
if axis is None :
a = atleast_1d ( a . ravel ( ) )
if weighted :
weights = atleast_1d ( weights . ravel ( ) )
axis = 0
# Define the range
if range is None :
mn , mx = a . min ( ) , a . max ( )
if mn == mx :
mn = mn - .5
mx = mx + .5
range = [ mn , mx ]
# Find the optimal number of bins .
if bins is None or isinstance ( bins , str ) :
bins = _optimize_binning ( a , range , bins )
# Compute the bin edges if they are not given explicitely .
# For the rightmost bin , we want values equal to the right
# edge to be counted in the last bin , and not as an outlier .
# Hence , we shift the last bin by a tiny amount .
if not iterable ( bins ) :
dr = diff ( range ) / bins * 1e-10
edges = linspace ( range [ 0 ] , range [ 1 ] + dr , bins + 1 , endpoint = True )
else :
edges = asarray ( bins , float )
dedges = diff ( edges )
bincenters = edges [ : - 1 ] + dedges / 2.
# Number of bins
nbin = len ( edges ) - 1
# Measure of bin precision .
decimal = int ( - log10 ( dedges . min ( ) ) + 10 )
# Choose the fastest histogramming method
even = ( len ( set ( around ( dedges , decimal ) ) ) == 1 )
if strategy is None :
if even :
strategy = 'binsize'
else :
if nbin > 30 : # approximative threshold
strategy = 'searchsort'
else :
strategy = 'digitize'
else :
if strategy not in [ 'binsize' , 'digitize' , 'searchsort' ] :
raise ValueError ( 'Unknown histogramming strategy.' , strategy )
if strategy == 'binsize' and not even :
raise ValueError ( 'This binsize strategy cannot be used for uneven bins.' )
# Stochastics for the fixed _ binsize functions .
start = float ( edges [ 0 ] )
binwidth = float ( dedges [ 0 ] )
# Looping to reduce memory usage
block = 66600
slices = [ slice ( None ) ] * a . ndim
for i in arange ( 0 , len ( a ) , block ) :
slices [ axis ] = slice ( i , i + block )
at = a [ slices ]
if weighted :
at = concatenate ( ( at , weights [ slices ] ) , axis )
if strategy == 'binsize' :
count = apply_along_axis ( _splitinmiddle , axis , at , flib . weighted_fixed_binsize , start , binwidth , nbin )
elif strategy == 'searchsort' :
count = apply_along_axis ( _splitinmiddle , axis , at , _histogram_searchsort_weighted , edges )
elif strategy == 'digitize' :
count = apply_along_axis ( _splitinmiddle , axis , at , _histogram_digitize , edges , normed )
else :
if strategy == 'binsize' :
count = apply_along_axis ( flib . fixed_binsize , axis , at , start , binwidth , nbin )
elif strategy == 'searchsort' :
count = apply_along_axis ( _histogram_searchsort , axis , at , edges )
elif strategy == 'digitize' :
count = apply_along_axis ( _histogram_digitize , axis , at , None , edges , normed )
if i == 0 :
total = count
else :
total += count
# Outlier count
upper = total . take ( array ( [ - 1 ] ) , axis )
lower = total . take ( array ( [ 0 ] ) , axis )
# Non - outlier count
core = a . ndim * [ slice ( None ) ]
core [ axis ] = slice ( 1 , - 1 )
hist = total [ core ]
if normed :
normalize = lambda x : atleast_1d ( x / ( x * dedges ) . sum ( ) )
hist = apply_along_axis ( normalize , axis , hist )
return hist , { 'edges' : edges , 'lower' : lower , 'upper' : upper , 'bincenters' : bincenters , 'strategy' : strategy } |
def has_all_nonzero_segment_lengths ( neuron , threshold = 0.0 ) :
'''Check presence of neuron segments with length not above threshold
Arguments :
neuron ( Neuron ) : The neuron object to test
threshold ( float ) : value above which a segment length is considered to
be non - zero
Returns :
CheckResult with result including list of ( section _ id , segment _ id )
of zero length segments''' | bad_ids = [ ]
for sec in _nf . iter_sections ( neuron ) :
p = sec . points
for i , s in enumerate ( zip ( p [ : - 1 ] , p [ 1 : ] ) ) :
if segment_length ( s ) <= threshold :
bad_ids . append ( ( sec . id , i ) )
return CheckResult ( len ( bad_ids ) == 0 , bad_ids ) |
def get_ideas_in_review ( self , ** kwargs ) :
""": pagination _ param : ' page _ number ' , ' page _ size ' , ' order _ key '""" | return bind_api ( api = self , path = '/ideas/inreview' , payload_type = 'idea' , payload_list = True , pagination_param = [ 'page_number' , 'page_size' , 'order_key' ] ) ( ** kwargs ) |
def css ( self ) -> str :
"""Generate a random snippet of CSS .
: return : CSS .""" | selector = self . random . choice ( CSS_SELECTORS )
css_sel = '{}{}' . format ( selector , self . __text . word ( ) )
cont_tag = self . random . choice ( list ( HTML_CONTAINER_TAGS . keys ( ) ) )
mrk_tag = self . random . choice ( HTML_MARKUP_TAGS )
base = '{}' . format ( self . random . choice ( [ cont_tag , mrk_tag , css_sel ] ) )
props = '; ' . join ( [ self . css_property ( ) for _ in range ( self . random . randint ( 1 , 6 ) ) ] )
return '{} {{{}}}' . format ( base , props ) |
def has_adjacent_fragments_only ( self , min_index = None , max_index = None ) :
"""Return ` ` True ` ` if the list contains only adjacent fragments ,
that is , if it does not have gaps .
: param int min _ index : examine fragments with index greater than or equal to this index ( i . e . , included )
: param int max _ index : examine fragments with index smaller than this index ( i . e . , excluded )
: raises ValueError : if ` ` min _ index ` ` is negative or ` ` max _ index ` `
is bigger than the current number of fragments
: rtype : bool""" | min_index , max_index = self . _check_min_max_indices ( min_index , max_index )
for i in range ( min_index , max_index - 1 ) :
current_interval = self [ i ] . interval
next_interval = self [ i + 1 ] . interval
if not current_interval . is_adjacent_before ( next_interval ) :
self . log ( u"Found non adjacent fragments" )
self . log ( [ u" Index %d => %s" , i , current_interval ] )
self . log ( [ u" Index %d => %s" , i + 1 , next_interval ] )
return False
return True |
def get_splits_query ( self ) :
"""Returns all the splits in the account""" | query = ( self . book . session . query ( Split ) . filter ( Split . account == self . account ) )
return query |
def remove_number_words ( text_string ) :
'''Removes any integer represented as a word within text _ string and returns the new string as
type str .
Keyword argument :
- text _ string : string instance
Exceptions raised :
- InputError : occurs should a non - string argument be passed''' | if text_string is None or text_string == "" :
return ""
elif isinstance ( text_string , str ) :
for word in NUMBER_WORDS :
text_string = re . sub ( r'[\S]*\b' + word + r'[\S]*' , "" , text_string )
return " " . join ( text_string . split ( ) )
else :
raise InputError ( "string not passed as argument" ) |
def modify_snapshot_attribute ( self , snapshot_id , attribute = 'createVolumePermission' , operation = 'add' , user_ids = None , groups = None ) :
"""Changes an attribute of an image .
: type snapshot _ id : string
: param snapshot _ id : The snapshot id you wish to change
: type attribute : string
: param attribute : The attribute you wish to change . Valid values are :
createVolumePermission
: type operation : string
: param operation : Either add or remove ( this is required for changing
snapshot ermissions )
: type user _ ids : list
: param user _ ids : The Amazon IDs of users to add / remove attributes
: type groups : list
: param groups : The groups to add / remove attributes . The only valid
value at this time is ' all ' .""" | params = { 'SnapshotId' : snapshot_id , 'Attribute' : attribute , 'OperationType' : operation }
if user_ids :
self . build_list_params ( params , user_ids , 'UserId' )
if groups :
self . build_list_params ( params , groups , 'UserGroup' )
return self . get_status ( 'ModifySnapshotAttribute' , params , verb = 'POST' ) |
def perform_command ( self ) :
"""Perform command and return the appropriate exit code .
: rtype : int""" | if len ( self . actual_arguments ) < 1 :
return self . print_help ( )
audio_file_path = self . actual_arguments [ 0 ]
if not self . check_input_file ( audio_file_path ) :
return self . ERROR_EXIT_CODE
try :
prober = FFPROBEWrapper ( rconf = self . rconf , logger = self . logger )
dictionary = prober . read_properties ( audio_file_path )
for key in sorted ( dictionary . keys ( ) ) :
self . print_generic ( u"%s %s" % ( key , dictionary [ key ] ) )
return self . NO_ERROR_EXIT_CODE
except FFPROBEPathError :
self . print_error ( u"Unable to call the ffprobe executable '%s'" % ( self . rconf [ RuntimeConfiguration . FFPROBE_PATH ] ) )
self . print_error ( u"Make sure the path to ffprobe is correct" )
except ( FFPROBEUnsupportedFormatError , FFPROBEParsingError ) :
self . print_error ( u"Cannot read properties of file '%s'" % ( audio_file_path ) )
self . print_error ( u"Make sure the input file has a format supported by ffprobe" )
return self . ERROR_EXIT_CODE |
def append ( self , val ) :
"""Append the element * val * to the list . Raises a ValueError if the * val *
would violate the sort order .""" | _maxes , _lists , _keys = self . _maxes , self . _lists , self . _keys
key = self . _key ( val )
if not _maxes :
_maxes . append ( key )
_keys . append ( [ key ] )
_lists . append ( [ val ] )
self . _len = 1
return
pos = len ( _keys ) - 1
if key < _keys [ pos ] [ - 1 ] :
msg = '{0} not in sort order at index {1}' . format ( repr ( val ) , self . _len )
raise ValueError ( msg )
_maxes [ pos ] = key
_keys [ pos ] . append ( key )
_lists [ pos ] . append ( val )
self . _len += 1
self . _expand ( pos ) |
def refresh ( self , new_path = None , force_current = False ) :
"""Refresh widget
force = False : won ' t refresh widget if path has not changed""" | if new_path is None :
new_path = getcwd_or_home ( )
if force_current :
index = self . set_current_folder ( new_path )
self . expand ( index )
self . setCurrentIndex ( index )
self . set_previous_enabled . emit ( self . histindex is not None and self . histindex > 0 )
self . set_next_enabled . emit ( self . histindex is not None and self . histindex < len ( self . history ) - 1 )
# Disable the view of . spyproject .
self . filter_directories ( ) |
def getUs ( classname , eta_s , f , alpha_s , alpha_e , m_u , m_d , m_s , m_c , m_b , m_e , m_mu , m_tau ) :
"""Get the QCD evolution matrix .""" | w , v = admeig ( classname , f , m_u , m_d , m_s , m_c , m_b , m_e , m_mu , m_tau )
b0s = 11 - 2 * f / 3
a = w / ( 2 * b0s )
return v @ np . diag ( eta_s ** a ) @ np . linalg . inv ( v ) |
def can_create_repository_with_record_types ( self , repository_record_types ) :
"""Tests if this user can create a single ` ` Repository ` ` using the desired record types .
While ` ` RepositoryManager . getRepositoryRecordTypes ( ) ` ` can be
used to examine which records are supported , this method tests
which record ( s ) are required for creating a specific
` ` Repository ` ` . Providing an empty array tests if a
` ` Repository ` ` can be created with no records .
arg : repository _ record _ types ( osid . type . Type [ ] ) : array of
repository record types
return : ( boolean ) - ` ` true ` ` if ` ` Repository ` ` creation using
the specified ` ` Types ` ` is supported , ` ` false ` `
otherwise
raise : NullArgument - ` ` repository _ record _ types ` ` is ` ` null ` `
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . BinAdminSession . can _ create _ bin _ with _ record _ types
# NOTE : It is expected that real authentication hints will be
# handled in a service adapter above the pay grade of this impl .
if self . _catalog_session is not None :
return self . _catalog_session . can_create_catalog_with_record_types ( catalog_record_types = repository_record_types )
return True |
def credential_list_mappings ( self ) :
"""Access the credential _ list _ mappings
: returns : twilio . rest . api . v2010 . account . sip . domain . credential _ list _ mapping . CredentialListMappingList
: rtype : twilio . rest . api . v2010 . account . sip . domain . credential _ list _ mapping . CredentialListMappingList""" | if self . _credential_list_mappings is None :
self . _credential_list_mappings = CredentialListMappingList ( self . _version , account_sid = self . _solution [ 'account_sid' ] , domain_sid = self . _solution [ 'sid' ] , )
return self . _credential_list_mappings |
def get_tuid ( self , branch , revision , file ) :
""": param branch : BRANCH TO FIND THE REVISION / FILE
: param revision : THE REVISION NUNMBER
: param file : THE FULL PATH TO A SINGLE FILE
: return : A LIST OF TUIDS""" | service_response = wrap ( self . get_tuids ( branch , revision , [ file ] ) )
for f , t in service_response . items ( ) :
return t |
def get_relationship_lookup_session ( self ) :
"""Gets the ` ` OsidSession ` ` associated with the relationship lookup service .
return : ( osid . relationship . RelationshipLookupSession ) - a
` ` RelationshipLookupSession ` `
raise : OperationFailed - unable to complete request
raise : Unimplemented - ` ` supports _ relationship _ lookup ( ) ` ` is
` ` false ` `
* compliance : optional - - This method must be implemented if ` ` supports _ relationship _ lookup ( ) ` `
is ` ` true ` ` . *""" | if not self . supports_relationship_lookup ( ) :
raise Unimplemented ( )
try :
from . import sessions
except ImportError :
raise OperationFailed ( )
try :
session = sessions . RelationshipLookupSession ( proxy = self . _proxy , runtime = self . _runtime )
except AttributeError :
raise OperationFailed ( )
return session |
def _variants_arities ( fields , arities , info_counts ) :
"""Utility function to determine arities ( i . e . , number of values to
expect ) for variants fields .""" | if arities is None : # no arities specified by user
arities = dict ( )
for f , vcf_count in zip ( fields , info_counts ) :
if f == 'FILTER' :
arities [ f ] = 1
# force one value for the FILTER field
elif f not in arities : # arity not specified by user
if f in config . STANDARD_VARIANT_FIELDS :
arities [ f ] = config . DEFAULT_VARIANT_ARITY [ f ]
elif vcf_count == NUMBER_ALLELE : # default to 1 ( biallelic )
arities [ f ] = 1
elif vcf_count <= 0 : # catch any other cases of non - specific arity
arities [ f ] = 1
else : # use arity ( i . e . , number ) specified in INFO header
arities [ f ] = vcf_count
# convert to tuple for zipping with fields
arities = tuple ( arities [ f ] for f in fields )
return arities |
def disassembler ( co , lasti = - 1 ) :
"""Disassemble a code object .
: param co : code object
: param lasti : internal
: yields : Instructions .""" | code = co . co_code
labels = dis . findlabels ( code )
linestarts = dict ( dis . findlinestarts ( co ) )
i = 0
extended_arg = 0
lineno = 0
free = None
for i , op , oparg in _walk_ops ( co ) :
if i in linestarts :
lineno = linestarts [ i ]
instr = Instruction ( i = i , op = op , lineno = lineno )
instr . linestart = i in linestarts
if i == lasti :
instr . lasti = True
else :
instr . lasti = False
if i in labels :
instr . label = True
else :
instr . label = False
instr . oparg = oparg
extended_arg = 0
if op == dis . EXTENDED_ARG :
extended_arg = oparg * 65536
instr . extended_arg = extended_arg
if op >= dis . HAVE_ARGUMENT :
if op in dis . hasconst :
instr . arg = co . co_consts [ oparg ]
elif op in dis . hasname :
instr . arg = co . co_names [ oparg ]
elif op in dis . hasjrel :
instr . arg = i + oparg
elif op in dis . haslocal :
instr . arg = co . co_varnames [ oparg ]
elif op in dis . hascompare :
instr . arg = dis . cmp_op [ oparg ]
elif op in dis . hasfree :
if free is None :
free = co . co_cellvars + co . co_freevars
instr . arg = free [ oparg ]
yield instr |
def get_catalog ( ) :
"""Returns a catalog of available transforms . These are used to
build chains for rendering with different back ends .""" | tforms = { }
for name , value in list ( globals ( ) . items ( ) ) :
if name . endswith ( 'Transform' ) :
tforms [ name ] = value
return Bunch . Bunch ( tforms , caseless = True ) |
def install_client_interceptors ( client_interceptors = ( ) ) :
"""Install client interceptors for the patchers .
: param client _ interceptors : a list of client interceptors to install .
Should be a list of classes""" | if not _valid_args ( client_interceptors ) :
raise ValueError ( 'client_interceptors argument must be a list' )
from . . http_client import ClientInterceptors
for client_interceptor in client_interceptors :
logging . info ( 'Loading client interceptor %s' , client_interceptor )
interceptor_class = _load_symbol ( client_interceptor )
logging . info ( 'Adding client interceptor %s' , client_interceptor )
ClientInterceptors . append ( interceptor_class ( ) ) |
def _eta_from_phi ( self ) :
"""Update ` eta ` using current ` phi ` .""" | self . eta = scipy . ndarray ( N_NT - 1 , dtype = 'float' )
etaprod = 1.0
for w in range ( N_NT - 1 ) :
self . eta [ w ] = 1.0 - self . phi [ w ] / etaprod
etaprod *= self . eta [ w ]
_checkParam ( 'eta' , self . eta , self . PARAMLIMITS , self . PARAMTYPES ) |
def luid ( self ) :
"""Unique ID of the current stage ( fully qualified ) .
example :
> > > stage . luid
pipe . 0001 . stage . 0004
: getter : Returns the fully qualified uid of the current stage
: type : String""" | p_elem = self . parent_pipeline . get ( 'name' )
if not p_elem :
p_elem = self . parent_pipeline [ 'uid' ]
s_elem = self . name
if not s_elem :
s_elem = self . uid
return '%s.%s' % ( p_elem , s_elem ) |
def copy_to_clipboard ( self , url ) :
"""Attempt to copy the selected URL to the user ' s clipboard""" | if url is None :
self . term . flash ( )
return
try :
clipboard_copy ( url )
except ( ProgramError , OSError ) as e :
_logger . exception ( e )
self . term . show_notification ( 'Failed to copy url: {0}' . format ( e ) )
else :
self . term . show_notification ( [ 'Copied to clipboard:' , url ] , timeout = 1 ) |
def nx_to_ontology ( graph , source_node , output_path , base_iri ) :
"""Graph nodes are ID ' s , and have a ' label ' in the node data with the right label
: param graph :
: param source _ node :
: param str output _ path :
: param base _ iri :""" | ontology = owlready . Ontology ( base_iri )
parent_lookup = { source_node : types . new_class ( source_node , ( owlready . Thing , ) , kwds = { "ontology" : ontology } ) }
def recur ( pnode ) :
for neighbor in graph . neighbors ( pnode ) :
data = graph . node [ neighbor ]
neighbor_class = types . new_class ( neighbor , ( parent_lookup [ pnode ] , ) , kwds = { "ontology" : ontology } )
owlready . ANNOTATIONS [ neighbor_class ] . add_annotation ( owlready . rdfs . label , data [ 'label' ] )
recur ( neighbor )
recur ( source_node )
ontology . save ( filename = output_path ) |
def parse_kegg_entries ( f , context = None ) :
"""Iterate over entries in KEGG file .""" | section_id = None
entry_line = None
properties = { }
for lineno , line in enumerate ( f ) :
if line . strip ( ) == '///' : # End of entry
mark = FileMark ( context , entry_line , 0 )
yield KEGGEntry ( properties , filemark = mark )
properties = { }
section_id = None
entry_line = None
else :
if entry_line is None :
entry_line = lineno
# Look for beginning of section
m = re . match ( r'([A-Z_]+)\s+(.*)' , line . rstrip ( ) )
if m is not None :
section_id = m . group ( 1 ) . lower ( )
properties [ section_id ] = [ m . group ( 2 ) ]
elif section_id is not None :
properties [ section_id ] . append ( line . strip ( ) )
else :
raise ParseError ( 'Missing section identifier at line {}' . format ( lineno ) ) |
def make_plot ( self ) :
"""Generate the plot from time series and arguments""" | args = self . args
fftlength = float ( args . secpfft )
overlap = args . overlap
self . log ( 2 , "Calculating spectrum secpfft: {0}, overlap: {1}" . format ( fftlength , overlap ) )
overlap *= fftlength
# create plot
plot = Plot ( figsize = self . figsize , dpi = self . dpi )
ax = plot . gca ( )
# handle user specified plot labels
if self . args . legend :
nlegargs = len ( self . args . legend [ 0 ] )
else :
nlegargs = 0
if nlegargs > 0 and nlegargs != self . n_datasets :
warnings . warn ( 'The number of legends specified must match ' 'the number of time series' ' (channels * start times). ' 'There are {:d} series and {:d} legends' . format ( len ( self . timeseries ) , len ( self . args . legend ) ) )
nlegargs = 0
# don ' t use themm
for i in range ( 0 , self . n_datasets ) :
series = self . timeseries [ i ]
if nlegargs :
label = self . args . legend [ 0 ] [ i ]
else :
label = series . channel . name
if len ( self . start_list ) > 1 :
label += ', {0}' . format ( series . epoch . gps )
asd = series . asd ( fftlength = fftlength , overlap = overlap )
self . spectra . append ( asd )
if self . usetex :
label = label_to_latex ( label )
ax . plot ( asd , label = label )
if args . xscale == 'log' and not args . xmin :
args . xmin = 1 / fftlength
return plot |
def main ( ) :
"""This is the main module for the script . The script will accept a file , or a directory , and then
encrypt it with a provided key before pushing it to S3 into a specified bucket .""" | parser = argparse . ArgumentParser ( description = main . __doc__ , add_help = True )
parser . add_argument ( '-M' , '--master_key' , dest = 'master_key' , help = 'Path to the master key ' + 'used for the encryption. Data is transferred without encryption if this' + 'is not provided.' , type = str , required = False , default = None )
parser . add_argument ( '-B' , '--bucket' , dest = 'bucket' , help = 'S3 bucket.' , type = str , required = True )
parser . add_argument ( '-R' , '--remote_dir' , dest = 'remote_dir' , help = 'Pseudo directory within ' + 'the bucket to store the file(s). NOTE: Folder structure below ' + 'REMOTE_DIR will be retained.' , type = str , required = False , default = '' )
parser . add_argument ( 'data' , help = 'File(s) or folder(s) to transfer to S3.' , type = str , nargs = '+' )
params = parser . parse_args ( )
# Input handling
if params . master_key and not os . path . exists ( params . master_key ) :
raise InputParameterError ( 'The master key was not found at ' + params . master_key )
# If the user doesn ' t have ~ / . boto , it doesn ' t even make sense to go ahead
if not os . path . exists ( os . path . expanduser ( '~/.boto' ) ) :
raise RuntimeError ( '~/.boto not found' )
# Ensure that the remote directory doesn ' t start with a /
if params . remote_dir . startswith ( '/' ) :
raise InputParameterError ( 'The remote dir cannot start with a \'/\'' )
# Process each of the input arguments .
for datum in params . data :
datum = os . path . abspath ( datum )
if not os . path . exists ( datum ) :
print ( 'ERROR: %s could not be found.' % datum , file = sys . stderr )
continue
write_to_s3 ( datum , params . master_key , params . bucket , params . remote_dir )
return None |
def lsst_doc_shortlink_titlecase_display_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) :
"""Link to LSST documents given their handle using LSST ' s ls . st link
shortener with the document handle displayed in title case .
This role is useful for Document , Report , Minutes , and Collection
DocuShare handles .
Example : :
: document : ` 1 `""" | options = options or { }
content = content or [ ]
node = nodes . reference ( text = '{0}-{1}' . format ( name . title ( ) , text ) , refuri = 'https://ls.st/{0}-{1}' . format ( name , text ) , ** options )
return [ node ] , [ ] |
def get_known_words ( self , lang ) :
"""Get a list of all words learned by user in a language .""" | words = [ ]
for topic in self . user_data . language_data [ lang ] [ 'skills' ] :
if topic [ 'learned' ] :
words += topic [ 'words' ]
return set ( words ) |
def write_pre_script ( self , fh ) :
"""Write the pre script for the job , if there is one
@ param fh : descriptor of open DAG file .""" | if self . __pre_script :
fh . write ( 'SCRIPT PRE ' + str ( self ) + ' ' + self . __pre_script + ' ' + ' ' . join ( self . __pre_script_args ) + '\n' ) |
def validate_feature_api ( project , force = False ) :
"""Validate feature API""" | if not force and not project . on_pr ( ) :
raise SkippedValidationTest ( 'Not on PR' )
validator = FeatureApiValidator ( project )
result = validator . validate ( )
if not result :
raise InvalidFeatureApi |
def post_process ( self , paths , dry_run = False , ** options ) :
"""Overridden to work around https : / / code . djangoproject . com / ticket / 19111""" | with post_process_error_counter ( self ) :
with patched_name_fn ( self , 'hashed_name' , 'hashed name' ) :
with patched_name_fn ( self , 'url' , 'url' ) :
for result in super ( LaxPostProcessorMixin , self ) . post_process ( paths , dry_run , ** options ) :
yield result
error_count = self . _post_process_error_count
if error_count :
print ( '%s post-processing error%s.' % ( error_count , '' if error_count == 1 else 's' ) ) |
def normalize_address ( self , addr , is_write = False , convert_to_valueset = False , target_region = None , condition = None ) : # pylint : disable = arguments - differ
"""Convert a ValueSet object into a list of addresses .
: param addr : A ValueSet object ( which describes an address )
: param is _ write : Is this address used in a write or not
: param convert _ to _ valueset : True if you want to have a list of ValueSet instances instead of AddressWrappers ,
False otherwise
: param target _ region : Which region to normalize the address to . To leave the decision to SimuVEX , set it to None
: return : A list of AddressWrapper or ValueSet objects""" | targets_limit = WRITE_TARGETS_LIMIT if is_write else READ_TARGETS_LIMIT
if type ( addr ) is not int :
for constraint in self . state . solver . constraints :
if getattr ( addr , 'variables' , set ( ) ) & constraint . variables :
addr = self . _apply_condition_to_symbolic_addr ( addr , constraint )
# Apply the condition if necessary
if condition is not None :
addr = self . _apply_condition_to_symbolic_addr ( addr , condition )
if type ( addr ) is int :
addr = self . state . solver . BVV ( addr , self . state . arch . bits )
addr_with_regions = self . _normalize_address_type ( addr )
address_wrappers = [ ]
for region , addr_si in addr_with_regions :
concrete_addrs = addr_si . eval ( targets_limit )
if len ( concrete_addrs ) == targets_limit and HYBRID_SOLVER in self . state . options :
exact = True if APPROXIMATE_FIRST not in self . state . options else None
solutions = self . state . solver . eval_upto ( addr , targets_limit , exact = exact )
if len ( solutions ) < len ( concrete_addrs ) :
concrete_addrs = [ addr_si . intersection ( s ) . eval ( 1 ) [ 0 ] for s in solutions ]
if len ( concrete_addrs ) == targets_limit :
self . state . history . add_event ( 'mem' , message = 'concretized too many targets. address = %s' % addr_si )
for c in concrete_addrs :
aw = self . _normalize_address ( region , c , target_region = target_region )
address_wrappers . append ( aw )
if convert_to_valueset :
return [ i . to_valueset ( self . state ) for i in address_wrappers ]
else :
return address_wrappers |
def cleanup ( self ) :
'''Stop backgroud thread and cleanup resources''' | self . _processing_stop = True
self . _wakeup_processing_thread ( )
self . _processing_stopped_event . wait ( 3 ) |
def extract_props ( self , settings ) :
'''Extract all valuable properties to be displayed''' | props = { }
for param in self . call_parameters :
if param in settings :
props [ param ] = settings [ param ]
else :
props [ param ] = None
return props |
def ctypes2numpy ( cptr , length , dtype ) :
"""Convert a ctypes pointer array to a numpy array .""" | if not isinstance ( cptr , ctypes . POINTER ( ctypes . c_float ) ) :
raise RuntimeError ( 'expected float pointer' )
res = np . zeros ( length , dtype = dtype )
if not ctypes . memmove ( res . ctypes . data , cptr , length * res . strides [ 0 ] ) :
raise RuntimeError ( 'memmove failed' )
return res |
def anomalous_score ( self ) :
"""Anomalous score of this reviewer .
Initial anomalous score is : math : ` 1 / | R | `
where : math : ` R ` is a set of reviewers .""" | return self . _anomalous if self . _anomalous else 1. / len ( self . _graph . reviewers ) |
def value_after ( self , x ) :
"""Returns value of argument after given found argument ( or list thereof ) .""" | try :
try :
i = self . all . index ( x )
except ValueError :
return None
return self . all [ i + 1 ]
except IndexError :
return None |
def remote_port_id_mac_uneq_store ( self , remote_port_id_mac ) :
"""This function saves the port MAC , if different from stored .""" | if remote_port_id_mac != self . remote_port_id_mac :
self . remote_port_id_mac = remote_port_id_mac
return True
return False |
def add_query ( self , name , filter , ** kwargs ) :
"""Add a new query to device query service .
. . code - block : : python
f = api . add _ query (
name = " Query name " ,
filter = {
" device _ id " : { " $ eq " : " 01234 " } ,
custom _ attributes = {
" foo " : { " $ eq " : " bar " }
print ( f . created _ at )
: param str name : Name of query ( Required )
: param dict filter : Filter properties to apply ( Required )
: param return : The newly created query object .
: return : the newly created query object
: rtype : Query""" | # Ensure we have the correct types and get the new query object
filter_obj = filters . legacy_filter_formatter ( dict ( filter = filter ) , Device . _get_attributes_map ( ) ) if filter else None
query_map = Query . _create_request_map ( kwargs )
# Create the DeviceQuery object
f = DeviceQuery ( name = name , query = filter_obj [ 'filter' ] , ** query_map )
api = self . _get_api ( device_directory . DefaultApi )
return Query ( api . device_query_create ( f ) ) |
def updateMigrationBlockStatus ( self , migration_status = 0 , migration_block = None , migration_request = None ) :
"""migration _ status :
0 = PENDING
1 = IN PROGRESS
2 = COMPLETED
3 = FAILED ( will be retried )
9 = Terminally FAILED
status change :
0 - > 1
1 - > 2
1 - > 3
1 - > 9
are only allowed changes for working through migration .
3 - > 1 allowed for retrying .""" | conn = self . dbi . connection ( )
tran = conn . begin ( )
try :
if migration_block :
upst = dict ( migration_status = migration_status , migration_block_id = migration_block , last_modification_date = dbsUtils ( ) . getTime ( ) )
elif migration_request :
upst = dict ( migration_status = migration_status , migration_request_id = migration_request , last_modification_date = dbsUtils ( ) . getTime ( ) )
self . mgrup . execute ( conn , upst )
except :
if tran :
tran . rollback ( )
raise
else :
if tran :
tran . commit ( )
finally :
if conn :
conn . close ( ) |
def read_utf8 ( fh , byteorder , dtype , count , offsetsize ) :
"""Read tag data from file and return as unicode string .""" | return fh . read ( count ) . decode ( 'utf-8' ) |
def get_columns ( self , * , top = None , skip = None ) :
"""Return the columns of this table
: param int top : specify n columns to retrieve
: param int skip : specify n columns to skip""" | url = self . build_url ( self . _endpoints . get ( 'get_columns' ) )
params = { }
if top is not None :
params [ '$top' ] = top
if skip is not None :
params [ '$skip' ] = skip
params = None if not params else params
response = self . session . get ( url , params = params )
if not response :
return iter ( ( ) )
data = response . json ( )
return ( self . column_constructor ( parent = self , ** { self . _cloud_data_key : column } ) for column in data . get ( 'value' , [ ] ) ) |
def stl ( A , b ) :
r"""Shortcut to ` ` solve _ triangular ( A , b , lower = True , check _ finite = False ) ` ` .
Solve linear systems : math : ` \ mathrm A \ mathbf x = \ mathbf b ` when
: math : ` \ mathrm A ` is a lower - triangular matrix .
Args :
A ( array _ like ) : A lower - triangular matrix .
b ( array _ like ) : Ordinate values .
Returns :
: class : ` numpy . ndarray ` : Solution ` ` x ` ` .
See Also
scipy . linalg . solve _ triangular : Solve triangular linear equations .""" | from scipy . linalg import solve_triangular
A = asarray ( A , float )
b = asarray ( b , float )
return solve_triangular ( A , b , lower = True , check_finite = False ) |
def method ( func ) :
"""Wrap a function as a method .""" | attr = abc . abstractmethod ( func )
attr . __imethod__ = True
return attr |
def _ParseRegisteredDLLs ( self , parser_mediator , registry_key ) :
"""Parses the registered DLLs that receive event notifications .
Args :
parser _ mediator ( ParserMediator ) : mediates interactions between parsers
and other components , such as storage and dfvfs .
registry _ key ( dfwinreg . WinRegistryKey ) : Windows Registry key .""" | notify_key = registry_key . GetSubkeyByName ( 'Notify' )
if not notify_key :
return
for subkey in notify_key . GetSubkeys ( ) :
for trigger in self . _TRIGGERS :
handler_value = subkey . GetValueByName ( trigger )
if not handler_value :
continue
values_dict = { 'Application' : subkey . name , 'Handler' : handler_value . GetDataAsObject ( ) , 'Trigger' : trigger }
command_value = subkey . GetValueByName ( 'DllName' )
if command_value :
values_dict [ 'Command' ] = command_value . GetDataAsObject ( )
event_data = windows_events . WindowsRegistryEventData ( )
event_data . key_path = subkey . path
event_data . offset = subkey . offset
event_data . regvalue = values_dict
event_data . source_append = ': Winlogon'
event = time_events . DateTimeValuesEvent ( subkey . last_written_time , definitions . TIME_DESCRIPTION_WRITTEN )
parser_mediator . ProduceEventWithEventData ( event , event_data ) |
def plot_dist_normal ( s , mu , sigma ) :
"""plot distribution""" | import matplotlib . pyplot as plt
count , bins , ignored = plt . hist ( s , 30 , normed = True )
plt . plot ( bins , 1 / ( sigma * np . sqrt ( 2 * np . pi ) ) * np . exp ( - ( bins - mu ) ** 2 / ( 2 * sigma ** 2 ) ) , linewidth = 2 , color = 'r' )
plt . show ( ) |
def get_sequence_value ( node ) :
"""Convert an element with DataType Sequence to a DataFrame .
Note this may be a naive implementation as I assume that bulk data is always a table""" | assert node . Datatype == 15
data = defaultdict ( list )
cols = [ ]
for i in range ( node . NumValues ) :
row = node . GetValue ( i )
if i == 0 : # Get the ordered cols and assume they are constant
cols = [ str ( row . GetElement ( _ ) . Name ) for _ in range ( row . NumElements ) ]
for cidx in range ( row . NumElements ) :
col = row . GetElement ( cidx )
data [ str ( col . Name ) ] . append ( XmlHelper . as_value ( col ) )
return DataFrame ( data , columns = cols ) |
def is_secondary_ref ( self , en : str ) -> bool :
"""Determine whether ' en ' is the name of something in the neighborhood of the requested classes
@ param en : element name
@ return : True if ' en ' is the name of a slot , class or type in the immediate neighborhood of of what we are
building""" | if not self . gen_classes :
return True
elif en in self . schema . classes :
return en in self . gen_classes_neighborhood . classrefs
elif en in self . schema . slots :
return en in self . gen_classes_neighborhood . slotrefs
elif en in self . schema . types :
return en in self . gen_classes_neighborhood . typerefs
else :
return True |
def remove_reaction ( self , reaction ) :
"""Remove reaction from model""" | if reaction not in self . _reaction_set :
return
self . _reaction_set . remove ( reaction )
self . _limits_lower . pop ( reaction , None )
self . _limits_upper . pop ( reaction , None )
# Remove compound from compound _ set if it is not referenced
# by any other reactions in the model .
for compound , value in self . _database . get_reaction_values ( reaction ) :
reactions = frozenset ( self . _database . get_compound_reactions ( compound ) )
if all ( other_reaction not in self . _reaction_set for other_reaction in reactions ) :
self . _compound_set . remove ( compound ) |
def parse_reports ( self , reports ) :
"""Args :
reports : list [ str ] - output from the report
Return :
A dict [ Str : Violation ]
Violation is a simple named tuple Defined above""" | violations_dict = defaultdict ( list )
for report in reports :
xml_document = cElementTree . fromstring ( "" . join ( report ) )
files = xml_document . findall ( ".//file" )
for file_tree in files :
for error in file_tree . findall ( 'error' ) :
line_number = error . get ( 'line' )
error_str = "{}: {}" . format ( error . get ( 'severity' ) , error . get ( 'message' ) )
violation = Violation ( int ( line_number ) , error_str )
filename = GitPathTool . relative_path ( file_tree . get ( 'name' ) )
violations_dict [ filename ] . append ( violation )
return violations_dict |
def deletePartials ( self , dryrun = False ) :
"""Delete any old partial uploads / downloads in path .""" | for ( vol , path ) in self . extraVolumes . items ( ) :
if not path . endswith ( ".part" ) :
continue
if self . _skipDryRun ( logger , 'INFO' , dryrun = dryrun ) ( "Delete subvolume %s" , path ) :
continue
self . butterVolumes [ vol . uuid ] . destroy ( ) |
def get_table ( ) :
"""Provides table of scheduling block instance metadata for use with AJAX
tables""" | response = dict ( blocks = [ ] )
block_ids = DB . get_sched_block_instance_ids ( )
for index , block_id in enumerate ( block_ids ) :
block = DB . get_block_details ( [ block_id ] ) . __next__ ( )
info = [ index , block [ 'id' ] , block [ 'sub_array_id' ] , len ( block [ 'processing_blocks' ] ) ]
response [ 'blocks' ] . append ( info )
return response , HTTPStatus . OK |
def rehighlight ( self ) :
"""Rehighlight the entire document , may be slow .""" | start = time . time ( )
QtWidgets . QApplication . setOverrideCursor ( QtGui . QCursor ( QtCore . Qt . WaitCursor ) )
try :
super ( SyntaxHighlighter , self ) . rehighlight ( )
except RuntimeError : # cloned widget , no need to rehighlight the same document twice ; )
pass
QtWidgets . QApplication . restoreOverrideCursor ( )
end = time . time ( )
_logger ( ) . debug ( 'rehighlight duration: %fs' % ( end - start ) ) |
def destroy ( self , bot_id ) :
"""Destroy a bot .
: param str bot _ id : the ID of the bot to destroy
: return : ` ` True ` ` if successful
: rtype : bool""" | url = utils . urljoin ( self . url , 'destroy' )
payload = { 'bot_id' : bot_id }
response = self . session . post ( url , json = payload )
return response . ok |
def auth_plugins ( auth_plugins = None ) :
"""Authentication plugins .
Usage , Add any plugin here that will serve as a rapid means to
authenticate to an OpenStack environment .
Syntax is as follows :
> > > _ _ auth _ plugins _ _ = {
. . . ' new _ plugin _ name ' : {
. . . ' os _ auth _ url ' : ' https : / / localhost : 5000 / v2.0 / tokens ' ,
. . . ' os _ prefix ' : {
. . . ' os _ apikey ' : ' apiKeyCredentials ' ,
. . . ' os _ password ' : ' passwordCredentials '
. . . ' args ' : {
. . . ' commands ' : [
. . . ' - - new - plugin - name - auth '
. . . ' choices ' : [
. . . ' RegionOne '
. . . ' help ' : ' Authentication plugin for New Plugin Name ' ,
. . . ' default ' : os . environ . get ( ' OS _ NEW _ PLUGIN _ AUTH ' , None ) ,
. . . ' metavar ' : ' [ REGION ] '
If the subdomain is in the auth url , as is the case with hp , add
" % ( region ) s " to the " os _ auth _ url " value . The region value from the list of
choices will be used as the string replacement . Note that if the
` os _ prefix ` key is added the system will override the authentication body
prefix with the string provided . At this time the choices are os _ apikey ,
os _ password , os _ token . All key entries are optional and should one not be
specified with a credential type a ` NotImplementedError ` will be raised .
: param auth _ plugins : Additional plugins to add in
: type auth _ plugins : ` ` dict ` `
: returns : ` ` dict ` `""" | __auth_plugins__ = { 'os_rax_auth' : { 'os_auth_url' : 'https://identity.api.rackspacecloud.com/v2.0/' 'tokens' , 'os_prefix' : { 'os_apikey' : 'RAX-KSKEY:apiKeyCredentials' , 'os_password' : 'passwordCredentials' } , 'args' : { 'commands' : [ '--os-rax-auth' ] , 'choices' : [ 'dfw' , 'ord' , 'iad' , 'syd' , 'hkg' , 'lon' ] , 'help' : 'Authentication Plugin for Rackspace Cloud' ' env[OS_RAX_AUTH]' , 'default' : os . environ . get ( 'OS_RAX_AUTH' , None ) , 'metavar' : '[REGION]' } } , 'rax_auth_v1' : { 'os_auth_version' : 'v1.0' , 'os_auth_url' : 'https://identity.api.rackspacecloud.com/v1.0' , 'args' : { 'commands' : [ '--rax-auth-v1' ] , 'action' : 'store_true' , 'help' : 'Authentication Plugin for Rackspace Cloud V1' } } , 'os_rax_auth_lon' : { 'os_auth_url' : 'https://lon.identity.api.rackspacecloud.com/' 'v2.0/tokens' , 'os_prefix' : { 'os_apikey' : 'RAX-KSKEY:apiKeyCredentials' , 'os_password' : 'passwordCredentials' } , 'args' : { 'commands' : [ '--os-rax-auth-lon' ] , 'choices' : [ 'lon' ] , 'help' : 'Authentication Plugin for Rackspace Cloud' ' env[OS_RAX_AUTH_LON]' , 'default' : os . environ . get ( 'OS_RAX_AUTH_LON' , None ) , 'metavar' : '[REGION]' } } , 'os_hp_auth' : { 'os_auth_url' : 'https://%(region)s.identity.hpcloudsvc.com:35357/' 'v2.0/tokens' , 'os_prefix' : { 'os_password' : 'passwordCredentials' } , 'args' : { 'commands' : [ '--os-hp-auth' ] , 'choices' : [ 'region-b.geo-1' , 'region-a.geo-1' ] , 'help' : 'Authentication Plugin for HP Cloud' ' env[OS_HP_AUTH]' , 'default' : os . environ . get ( 'OS_HP_AUTH' , None ) , 'metavar' : '[REGION]' } } }
if auth_plugins :
__auth_plugins__ . update ( auth_plugins )
return __auth_plugins__ |
def get_product_access_tokens ( self , app_ids = [ ] , package_ids = [ ] ) :
"""Get access tokens
: param app _ ids : list of app ids
: type app _ ids : : class : ` list `
: param package _ ids : list of package ids
: type package _ ids : : class : ` list `
: return : dict with ` ` apps ` ` and ` ` packages ` ` containing their access tokens , see example below
: rtype : : class : ` dict ` , : class : ` None `
. . code : : python
{ ' apps ' : { 123 : ' token ' , . . . } ,
' packages ' : { 456 : ' token ' , . . . }""" | if not app_ids and not package_ids :
return
resp = self . send_job_and_wait ( MsgProto ( EMsg . ClientPICSAccessTokenRequest ) , { 'appids' : map ( int , app_ids ) , 'packageids' : map ( int , package_ids ) , } , timeout = 15 )
if resp :
return { 'apps' : dict ( map ( lambda app : ( app . appid , app . access_token ) , resp . app_access_tokens ) ) , 'packages' : dict ( map ( lambda pkg : ( pkg . appid , pkg . access_token ) , resp . package_access_tokens ) ) , } |
def convolveSpectrum ( Omega , CrossSection , Resolution = 0.1 , AF_wing = 10. , SlitFunction = SLIT_RECTANGULAR , Wavenumber = None ) :
"""INPUT PARAMETERS :
Wavenumber / Omega : wavenumber grid ( required )
CrossSection : high - res cross section calculated on grid ( required )
Resolution : instrumental resolution γ ( optional )
AF _ wing : instrumental function wing ( optional )
SlitFunction : instrumental function for low - res spectra calculation ( optional )
OUTPUT PARAMETERS :
Wavenum : wavenumber grid
CrossSection : low - res cross section calculated on grid
i1 : lower index in Omega input
i2 : higher index in Omega input
slit : slit function calculated over grid [ - AF _ wing ; AF _ wing ]
with the step equal to instrumental resolution .
DESCRIPTION :
Produce a simulation of experimental spectrum via the convolution
of a “ dry ” spectrum with an instrumental function .
Instrumental function is provided as a parameter and
is calculated in a grid with the width = AF _ wing and step = Resolution .
EXAMPLE OF USAGE :
nu _ , radi _ , i , j , slit = convolveSpectrum ( nu , radi , Resolution = 2.0 , AF _ wing = 10.0,
SlitFunction = SLIT _ MICHELSON )""" | # compatibility with older versions
if Wavenumber :
Omega = Wavenumber
step = Omega [ 1 ] - Omega [ 0 ]
if step >= Resolution :
raise Exception ( 'step must be less than resolution' )
# x = arange ( - AF _ wing , AF _ wing + step , step )
x = arange_ ( - AF_wing , AF_wing + step , step )
# fix
slit = SlitFunction ( x , Resolution )
# FIXING THE BUG : normalize slit function
slit /= sum ( slit ) * step
# simple normalization
left_bnd = len ( slit ) / 2
right_bnd = len ( Omega ) - len ( slit ) / 2
# CrossSectionLowRes = convolve ( CrossSection , slit , mode = ' valid ' ) * step
CrossSectionLowRes = convolve ( CrossSection , slit , mode = 'same' ) * step
# return Omega [ left _ bnd : right _ bnd ] , CrossSectionLowRes , left _ bnd , right _ bnd , slit
return Omega [ left_bnd : right_bnd ] , CrossSectionLowRes [ left_bnd : right_bnd ] , left_bnd , right_bnd , slit |
def strftime ( date_time = None , time_format = None ) :
"""将 datetime 对象转换为 str
: param :
* date _ time : ( obj ) datetime 对象
* time _ format : ( sting ) 日期格式字符串
: return :
* date _ time _ str : ( string ) 日期字符串""" | if not date_time :
datetime_now = datetime . now ( )
else :
datetime_now = date_time
if not time_format :
time_format = '%Y/%m/%d %H:%M:%S'
return datetime . strftime ( datetime_now , time_format ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.