signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def annotation_id ( self , value ) :
"""Set ID for Annotation"""
|
if value in [ None , '' ] or str ( value ) . strip ( ) == '' :
raise AttributeError ( "Invalid ID value supplied" )
self . _id = value
|
def tail ( f , window = 20 ) :
"""Returns the last ` window ` lines of file ` f ` as a list .
@ param window : the number of lines ."""
|
if window == 0 :
return [ ]
BUFSIZ = 1024
f . seek ( 0 , 2 )
bytes = f . tell ( )
size = window + 1
block = - 1
data = [ ]
while size > 0 and bytes > 0 :
if bytes - BUFSIZ > 0 : # Seek back one whole BUFSIZ
f . seek ( block * BUFSIZ , 2 )
# read BUFFER
data . insert ( 0 , f . read ( BUFSIZ ) . decode ( 'utf-8' , errors = 'ignore' ) )
else : # file too small , start from begining
f . seek ( 0 , 0 )
# only read what was not read
data . insert ( 0 , f . read ( bytes ) . decode ( 'utf-8' , errors = 'ignore' ) )
linesFound = data [ 0 ] . count ( '\n' )
size -= linesFound
bytes -= BUFSIZ
block -= 1
return '' . join ( data ) . splitlines ( ) [ - window : ]
|
async def answer_pre_checkout_query ( self , pre_checkout_query_id : base . String , ok : base . Boolean , error_message : typing . Union [ base . String , None ] = None ) -> base . Boolean :
"""Once the user has confirmed their payment and shipping details ,
the Bot API sends the final confirmation in the form of an Update with the field pre _ checkout _ query .
Use this method to respond to such pre - checkout queries .
Source : https : / / core . telegram . org / bots / api # answerprecheckoutquery
: param pre _ checkout _ query _ id : Unique identifier for the query to be answered
: type pre _ checkout _ query _ id : : obj : ` base . String `
: param ok : Specify True if everything is alright ( goods are available , etc . ) and the
bot is ready to proceed with the order . Use False if there are any problems .
: type ok : : obj : ` base . Boolean `
: param error _ message : Required if ok is False
Error message in human readable form that explains the reason for failure to proceed with the checkout
( e . g . " Sorry , somebody just bought the last of our amazing black T - shirts while you were busy filling
out your payment details . Please choose a different color or garment ! " ) .
Telegram will display this message to the user .
: type error _ message : : obj : ` typing . Union [ base . String , None ] `
: return : On success , True is returned
: rtype : : obj : ` base . Boolean `"""
|
payload = generate_payload ( ** locals ( ) )
result = await self . request ( api . Methods . ANSWER_PRE_CHECKOUT_QUERY , payload )
return result
|
def is_uniform ( self ) :
"""Return if file contains a uniform series of pages ."""
|
# the hashes of IFDs 0 , 7 , and - 1 are the same
pages = self . pages
page = pages [ 0 ]
if page . is_scanimage or page . is_nih :
return True
try :
useframes = pages . useframes
pages . useframes = False
h = page . hash
for i in ( 1 , 7 , - 1 ) :
if pages [ i ] . aspage ( ) . hash != h :
return False
except IndexError :
return False
finally :
pages . useframes = useframes
return True
|
def get_available_parameters ( self ) :
"""Return a list of the parameters made available by the script ."""
|
# At the moment , we rely on regex to extract the list of available
# parameters . A tighter integration with waf would allow for a more
# natural extraction of the information .
stdout = self . run_program ( "%s %s" % ( self . script_executable , '--PrintHelp' ) , environment = self . environment , native_spec = BUILD_GRID_PARAMS )
options = re . findall ( '.*Program\s(?:Arguments|Options):(.*)' 'General\sArguments.*' , stdout , re . DOTALL )
if len ( options ) :
args = re . findall ( '.*--(.*?):.*' , options [ 0 ] , re . MULTILINE )
return args
else :
return [ ]
|
def get_charge ( max_tdc , tdc_calibration_values , tdc_pixel_calibration ) : # Return the charge from calibration
'''Interpolatet the TDC calibration for each pixel from 0 to max _ tdc'''
|
charge_calibration = np . zeros ( shape = ( 80 , 336 , max_tdc ) )
for column in range ( 80 ) :
for row in range ( 336 ) :
actual_pixel_calibration = tdc_pixel_calibration [ column , row , : ]
if np . any ( actual_pixel_calibration != 0 ) and np . any ( np . isfinite ( actual_pixel_calibration ) ) :
selected_measurements = np . isfinite ( actual_pixel_calibration )
# Select valid calibration steps
selected_actual_pixel_calibration = actual_pixel_calibration [ selected_measurements ]
selected_tdc_calibration_values = tdc_calibration_values [ selected_measurements ]
interpolation = interp1d ( x = selected_actual_pixel_calibration , y = selected_tdc_calibration_values , kind = 'slinear' , bounds_error = False , fill_value = 0 )
charge_calibration [ column , row , : ] = interpolation ( np . arange ( max_tdc ) )
return charge_calibration
|
def Emulation_setNavigatorOverrides ( self , platform ) :
"""Function path : Emulation . setNavigatorOverrides
Domain : Emulation
Method name : setNavigatorOverrides
WARNING : This function is marked ' Experimental ' !
Parameters :
Required arguments :
' platform ' ( type : string ) - > The platform navigator . platform should return .
No return value .
Description : Overrides value returned by the javascript navigator object ."""
|
assert isinstance ( platform , ( str , ) ) , "Argument 'platform' must be of type '['str']'. Received type: '%s'" % type ( platform )
subdom_funcs = self . synchronous_command ( 'Emulation.setNavigatorOverrides' , platform = platform )
return subdom_funcs
|
def close ( self ) :
"""Closes all the iterators .
This is particularly important if the iterators are files ."""
|
if hasattr ( self , 'iterators' ) :
for it in self . iterators :
if hasattr ( it , 'close' ) :
it . close ( )
|
def delete_stream ( stream_name , region = None , key = None , keyid = None , profile = None ) :
'''Delete the stream with name stream _ name . This cannot be undone ! All data will be lost ! !
CLI example : :
salt myminion boto _ kinesis . delete _ stream my _ stream region = us - east - 1'''
|
conn = _get_conn ( region = region , key = key , keyid = keyid , profile = profile )
r = _execute_with_retries ( conn , "delete_stream" , StreamName = stream_name )
if 'error' not in r :
r [ 'result' ] = True
return r
|
def _parseline ( self , line ) :
"""Result CSV Line Parser .
: param line : a to parse
: returns : the number of rows to jump and parse the next data line or
return the code error - 1"""
|
sline = line . split ( SEPARATOR )
if is_header ( sline ) :
return self . _handle_header ( sline )
# If it is not an header it contains some data . but we need data only
# from the RESULT TABLE section .
elif not self . _cur_section == SECTION_RESULT_TABLE :
return 0
# If line starts with ' Sample ID ' , then it is res _ id of current record .
elif sline [ 0 ] . lower ( ) == 'Sample ID' . lower ( ) :
self . _cur_res_id = sline [ 1 ] . strip ( )
return 0
# If line starts with ' Assay ' column , it is keyword of Analysis Service .
elif sline [ 0 ] . lower ( ) == 'Assay' . lower ( ) :
self . _keyword = self . _format_keyword ( sline [ 1 ] )
return 0
# Results are in Analyte Result subsection . We can skip other lines .
elif not self . _cur_sub_section == SUBSECTION_ANALYTE_RESULT :
return 0
# If this is header line of Analyte Result table , set the columns
elif self . _is_header_line :
self . _is_header_line = False
self . _columns = sline
# Must be result line of Analyte Result Table .
else :
return self . _handle_result_line ( sline )
|
def set_own_module ( self , path ) :
"""This is provided so the calling process can arrange for processing
to be stopped and a LegionReset exception raised when any part of
the program ' s own module tree changes ."""
|
log = self . _params . get ( 'log' , self . _discard )
self . _name = path
self . module_add ( event_target ( self , 'legion_reset' , key = path , log = log ) , path )
|
def _stub_attr ( obj , attr_name ) :
'''Stub an attribute of an object . Will return an existing stub if
there already is one .'''
|
# Annoying circular reference requires importing here . Would like to see
# this cleaned up . @ AW
from . mock import Mock
# Check to see if this a property , this check is only for when dealing
# with an instance . getattr will work for classes .
is_property = False
if not inspect . isclass ( obj ) and not inspect . ismodule ( obj ) : # It ' s possible that the attribute is defined after initialization , and
# so is not on the class itself .
attr = getattr ( obj . __class__ , attr_name , None )
if isinstance ( attr , property ) :
is_property = True
if not is_property :
attr = getattr ( obj , attr_name )
# Return an existing stub
if isinstance ( attr , Stub ) :
return attr
# If a Mock object , stub its _ _ call _ _
if isinstance ( attr , Mock ) :
return stub ( attr . __call__ )
if isinstance ( attr , property ) :
return StubProperty ( obj , attr_name )
# Sadly , builtin functions and methods have the same type , so we have to
# use the same stub class even though it ' s a bit ugly
if inspect . ismodule ( obj ) and isinstance ( attr , ( types . FunctionType , types . BuiltinFunctionType , types . BuiltinMethodType ) ) :
return StubFunction ( obj , attr_name )
# In python3 unbound methods are treated as functions with no reference
# back to the parent class and no im _ * fields . We can still make unbound
# methods work by passing these through to the stub
if inspect . isclass ( obj ) and isinstance ( attr , types . FunctionType ) :
return StubUnboundMethod ( obj , attr_name )
# I thought that types . UnboundMethodType differentiated these cases but
# apparently not .
if isinstance ( attr , types . MethodType ) : # Handle differently if unbound because it ' s an implicit " any instance "
if getattr ( attr , 'im_self' , None ) is None : # Handle the python3 case and py2 filter
if hasattr ( attr , '__self__' ) :
if attr . __self__ is not None :
return StubMethod ( obj , attr_name )
if sys . version_info . major == 2 :
return StubUnboundMethod ( attr )
else :
return StubMethod ( obj , attr_name )
if isinstance ( attr , ( types . BuiltinFunctionType , types . BuiltinMethodType ) ) :
return StubFunction ( obj , attr_name )
# What an absurd type this is . . . .
if type ( attr ) . __name__ == 'method-wrapper' :
return StubMethodWrapper ( attr )
# This is also slot _ descriptor
if type ( attr ) . __name__ == 'wrapper_descriptor' :
return StubWrapperDescriptor ( obj , attr_name )
raise UnsupportedStub ( "can't stub %s(%s) of %s" , attr_name , type ( attr ) , obj )
|
def uri_path ( self , path ) :
"""Set the Uri - Path of a request .
: param path : the Uri - Path"""
|
path = path . strip ( "/" )
tmp = path . split ( "?" )
path = tmp [ 0 ]
paths = path . split ( "/" )
for p in paths :
option = Option ( )
option . number = defines . OptionRegistry . URI_PATH . number
option . value = p
self . add_option ( option )
if len ( tmp ) > 1 :
query = tmp [ 1 ]
self . uri_query = query
|
def kalman_filter ( points , noise ) :
"""Smooths points with kalman filter
See https : / / github . com / open - city / ikalman
Args :
points ( : obj : ` list ` of : obj : ` Point ` ) : points to smooth
noise ( float ) : expected noise"""
|
kalman = ikalman . filter ( noise )
for point in points :
kalman . update_velocity2d ( point . lat , point . lon , point . dt )
( lat , lon ) = kalman . get_lat_long ( )
point . lat = lat
point . lon = lon
return points
|
def declareLegacyItem ( typeName , schemaVersion , attributes , dummyBases = ( ) ) :
"""Generate a dummy subclass of Item that will have the given attributes ,
and the base Item methods , but no methods of its own . This is for use
with upgrading .
@ param typeName : a string , the Axiom TypeName to have attributes for .
@ param schemaVersion : an int , the ( old ) version of the schema this is a proxy
for .
@ param attributes : a dict mapping { columnName : attr instance } describing
the schema of C { typeName } at C { schemaVersion } .
@ param dummyBases : a sequence of 4 - tuples of ( baseTypeName ,
baseSchemaVersion , baseAttributes , baseBases ) representing the dummy bases
of this legacy class ."""
|
if ( typeName , schemaVersion ) in _legacyTypes :
return _legacyTypes [ typeName , schemaVersion ]
if dummyBases :
realBases = [ declareLegacyItem ( * A ) for A in dummyBases ]
else :
realBases = ( Item , )
attributes = attributes . copy ( )
attributes [ '__module__' ] = 'item_dummy'
attributes [ '__legacy__' ] = True
attributes [ 'typeName' ] = typeName
attributes [ 'schemaVersion' ] = schemaVersion
result = type ( str ( 'DummyItem<%s,%d>' % ( typeName , schemaVersion ) ) , realBases , attributes )
assert result is not None , 'wtf, %r' % ( type , )
_legacyTypes [ ( typeName , schemaVersion ) ] = result
return result
|
def get_value_at ( all_info , current_path , key , to_string = False ) :
"""Get value located at a given path
: param all _ info :
: param current _ path :
: param key :
: param to _ string :
: return :"""
|
keys = key . split ( '.' )
if keys [ - 1 ] == 'id' :
target_obj = current_path [ len ( keys ) - 1 ]
else :
if key == 'this' :
target_path = current_path
elif '.' in key :
target_path = [ ]
for i , key in enumerate ( keys ) :
if key == 'id' :
target_path . append ( current_path [ i ] )
else :
target_path . append ( key )
if len ( keys ) > len ( current_path ) :
target_path = target_path + keys [ len ( target_path ) : ]
else :
target_path = copy . deepcopy ( current_path )
target_path . append ( key )
target_obj = all_info
for p in target_path :
try :
if type ( target_obj ) == list and type ( target_obj [ 0 ] ) == dict :
target_obj = target_obj [ int ( p ) ]
elif type ( target_obj ) == list :
target_obj = p
elif p == '' :
target_obj = target_obj
else :
try :
target_obj = target_obj [ p ]
except Exception as e :
printInfo ( 'Info: %s\n' 'Path: %s\n' 'Key: %s' % ( str ( all_info ) , str ( current_path ) , str ( key ) ) )
printException ( e )
raise Exception
except Exception as e :
printInfo ( 'Info: %s\n' 'Path: %s\n' 'Key: %s' % ( str ( all_info ) , str ( current_path ) , str ( key ) ) )
printException ( e )
raise Exception
if to_string :
return str ( target_obj )
else :
return target_obj
|
def restore ( s , t ) :
"""s is the source string , it can contain ' . '
t is the target , it ' s smaller than s by the number of ' . ' s in s
Each char in s is replaced by the corresponding
char in t , jumping over ' . ' s in s .
> > > restore ( ' ABC . DEF ' , ' XYZABC ' )
' XYZ . ABC '"""
|
t = ( c for c in t )
return '' . join ( next ( t ) if not is_blacksquare ( c ) else c for c in s )
|
def _get_http_crl_distribution_points ( self , crl_distribution_points ) :
"""Fetches the DistributionPoint object for non - relative , HTTP CRLs
referenced by the certificate
: param crl _ distribution _ points :
A CRLDistributionPoints object to grab the DistributionPoints from
: return :
A list of zero or more DistributionPoint objects"""
|
output = [ ]
if crl_distribution_points is None :
return [ ]
for distribution_point in crl_distribution_points :
distribution_point_name = distribution_point [ 'distribution_point' ]
if distribution_point_name is VOID :
continue
# RFC 5280 indicates conforming CA should not use the relative form
if distribution_point_name . name == 'name_relative_to_crl_issuer' :
continue
# This library is currently only concerned with HTTP - based CRLs
for general_name in distribution_point_name . chosen :
if general_name . name == 'uniform_resource_identifier' :
output . append ( distribution_point )
return output
|
def _get_spacewalk_configuration ( spacewalk_url = '' ) :
'''Return the configuration read from the master configuration
file or directory'''
|
spacewalk_config = __opts__ [ 'spacewalk' ] if 'spacewalk' in __opts__ else None
if spacewalk_config :
try :
for spacewalk_server , service_config in six . iteritems ( spacewalk_config ) :
username = service_config . get ( 'username' , None )
password = service_config . get ( 'password' , None )
protocol = service_config . get ( 'protocol' , 'https' )
if not username or not password :
log . error ( 'Username or Password has not been specified in the master ' 'configuration for %s' , spacewalk_server )
return False
ret = { 'api_url' : '{0}://{1}/rpc/api' . format ( protocol , spacewalk_server ) , 'username' : username , 'password' : password }
if ( not spacewalk_url ) or ( spacewalk_url == spacewalk_server ) :
return ret
except Exception as exc :
log . error ( 'Exception encountered: %s' , exc )
return False
if spacewalk_url :
log . error ( 'Configuration for %s has not been specified in the master ' 'configuration' , spacewalk_url )
return False
return False
|
def dynamical_potential ( xdata , dt , order = 3 ) :
"""Computes potential from spring function
Parameters
xdata : ndarray
Position data for a degree of freedom ,
at which to calculate potential
dt : float
time between measurements
order : int
order of polynomial to fit
Returns
Potential : ndarray
valued of potential at positions in
xdata"""
|
import numpy as _np
adata = calc_acceleration ( xdata , dt )
xdata = xdata [ 2 : ]
# removes first 2 values as differentiating twice means
# we have acceleration [ n ] corresponds to position [ n - 2]
z = _np . polyfit ( xdata , adata , order )
p = _np . poly1d ( z )
spring_pot = _np . polyint ( p )
return - spring_pot
|
def writeCache ( self , filename ) :
"""Write the graph to a cache file ."""
|
with open ( filename , 'wb' ) as f :
pickle . dump ( self . modules , f )
|
def filter ( self , u ) :
"""Filter the valid identities for this matcher .
: param u : unique identity which stores the identities to filter
: returns : a list of identities valid to work with this matcher .
: raises ValueError : when the unique identity is not an instance
of UniqueIdentity class"""
|
if not isinstance ( u , UniqueIdentity ) :
raise ValueError ( "<u> is not an instance of UniqueIdentity" )
filtered = [ ]
for id_ in u . identities :
username = None
if self . sources and id_ . source . lower ( ) not in self . sources :
continue
if self . _check_username ( id_ . username ) :
username = id_ . username . lower ( )
if username :
fid = UsernameIdentity ( id_ . id , id_ . uuid , username )
filtered . append ( fid )
return filtered
|
def compute ( self , base , * args , ** kwargs ) :
'''Returns the value of the discount .
@ param base : float Computation base .
@ return : Decimal'''
|
return min ( base , super ( Discount , self ) . compute ( base , * args , ** kwargs ) )
|
def check ( text ) :
"""Suggest the preferred forms ."""
|
err = "pinker.latin"
msg = "Use English. '{}' is the preferred form."
list = [ [ "other things being equal" , [ "ceteris paribus" ] ] , [ "among other things" , [ "inter alia" ] ] , [ "in and of itself" , [ "simpliciter" ] ] , [ "having made the necessary changes" , [ "mutatis mutandis" ] ] , ]
return preferred_forms_check ( text , list , err , msg )
|
def steady_state_replacement ( random , population , parents , offspring , args ) :
"""Performs steady - state replacement for the offspring .
This function performs steady - state replacement , which means that
the offspring replace the least fit individuals in the existing
population , even if those offspring are less fit than the individuals
that they replace .
. . Arguments :
random - - the random number generator object
population - - the population of individuals
parents - - the list of parent individuals
offspring - - the list of offspring individuals
args - - a dictionary of keyword arguments"""
|
population . sort ( )
num_to_replace = min ( len ( offspring ) , len ( population ) )
population [ : num_to_replace ] = offspring [ : num_to_replace ]
return population
|
def parse_form_data ( environ , stream_factory = None , charset = 'utf-8' , errors = 'replace' , max_form_memory_size = None , max_content_length = None , cls = None , silent = True ) :
"""Parse the form data in the environ and return it as tuple in the form
` ` ( stream , form , files ) ` ` . You should only call this method if the
transport method is ` POST ` , ` PUT ` , or ` PATCH ` .
If the mimetype of the data transmitted is ` multipart / form - data ` the
files multidict will be filled with ` FileStorage ` objects . If the
mimetype is unknown the input stream is wrapped and returned as first
argument , else the stream is empty .
This is a shortcut for the common usage of : class : ` FormDataParser ` .
Have a look at : ref : ` dealing - with - request - data ` for more details .
. . versionadded : : 0.5
The ` max _ form _ memory _ size ` , ` max _ content _ length ` and
` cls ` parameters were added .
. . versionadded : : 0.5.1
The optional ` silent ` flag was added .
: param environ : the WSGI environment to be used for parsing .
: param stream _ factory : An optional callable that returns a new read and
writeable file descriptor . This callable works
the same as : meth : ` ~ BaseResponse . _ get _ file _ stream ` .
: param charset : The character set for URL and url encoded form data .
: param errors : The encoding error behavior .
: param max _ form _ memory _ size : the maximum number of bytes to be accepted for
in - memory stored form data . If the data
exceeds the value specified an
: exc : ` ~ exceptions . RequestEntityTooLarge `
exception is raised .
: param max _ content _ length : If this is provided and the transmitted data
is longer than this value an
: exc : ` ~ exceptions . RequestEntityTooLarge `
exception is raised .
: param cls : an optional dict class to use . If this is not specified
or ` None ` the default : class : ` MultiDict ` is used .
: param silent : If set to False parsing errors will not be caught .
: return : A tuple in the form ` ` ( stream , form , files ) ` ` ."""
|
return FormDataParser ( stream_factory , charset , errors , max_form_memory_size , max_content_length , cls , silent ) . parse_from_environ ( environ )
|
def build ( self , X , Y , w = None , edges = None ) :
"""Assigns data to this object and builds the requested topological
structure
@ In , X , an m - by - n array of values specifying m
n - dimensional samples
@ In , Y , a m vector of values specifying the output
responses corresponding to the m samples specified by X
@ In , w , an optional m vector of values specifying the
weights associated to each of the m samples used . Default of
None means all points will be equally weighted
@ In , edges , an optional list of custom edges to use as a
starting point for pruning , or in place of a computed graph ."""
|
self . reset ( )
if X is None or Y is None :
return
self . __set_data ( X , Y , w )
if self . debug :
sys . stdout . write ( "Graph Preparation: " )
start = time . clock ( )
self . graph_rep = nglpy . Graph ( self . Xnorm , self . graph , self . max_neighbors , self . beta , connect = self . connect , )
if self . debug :
end = time . clock ( )
sys . stdout . write ( "%f s\n" % ( end - start ) )
|
def retrieve ( self , request , project , pk = None ) :
"""Retrieve a bug - job - map entry . pk is a composite key in the form
bug _ id - job _ id"""
|
job_id , bug_id = map ( int , pk . split ( "-" ) )
job = Job . objects . get ( repository__name = project , id = job_id )
try :
bug_job_map = BugJobMap . objects . get ( job = job , bug_id = bug_id )
serializer = BugJobMapSerializer ( bug_job_map )
return Response ( serializer . data )
except BugJobMap . DoesNotExist :
return Response ( "Object not found" , status = HTTP_404_NOT_FOUND )
|
def _get_zone_name ( self ) :
"""Get receivers zone name if not set yet ."""
|
if self . _name is None : # Collect tags for AppCommand . xml call
tags = [ "GetZoneName" ]
# Execute call
root = self . exec_appcommand_post ( tags )
# Check result
if root is None :
_LOGGER . error ( "Getting ZoneName failed." )
else :
zone = self . _get_own_zone ( )
try :
name = root . find ( "./cmd/{zone}" . format ( zone = zone ) ) . text
except AttributeError :
_LOGGER . error ( "No ZoneName found for zone %s" , self . zone )
else :
self . _name = name . strip ( )
|
def feature_from_line ( line , dialect = None , strict = True , keep_order = False ) :
"""Given a line from a GFF file , return a Feature object
Parameters
line : string
strict : bool
If True ( default ) , assume ` line ` is a single , tab - delimited string that
has at least 9 fields .
If False , then the input can have a more flexible format , useful for
creating single ad hoc features or for writing tests . In this case ,
` line ` can be a multi - line string ( as long as it has a single non - empty
line ) , and , as long as there are only 9 fields ( standard GFF / GTF ) , then
it ' s OK to use spaces instead of tabs to separate fields in ` line ` .
But if > 9 fields are to be used , then tabs must be used .
keep _ order , dialect
Passed directly to : class : ` Feature ` ; see docstring for that class for
description
Returns
A new : class : ` Feature ` object ."""
|
if not strict :
lines = line . splitlines ( False )
_lines = [ ]
for i in lines :
i = i . strip ( )
if len ( i ) > 0 :
_lines . append ( i )
assert len ( _lines ) == 1 , _lines
line = _lines [ 0 ]
if '\t' in line :
fields = line . rstrip ( '\n\r' ) . split ( '\t' )
else :
fields = line . rstrip ( '\n\r' ) . split ( None , 8 )
else :
fields = line . rstrip ( '\n\r' ) . split ( '\t' )
try :
attr_string = fields [ 8 ]
except IndexError :
attr_string = ""
attrs , _dialect = parser . _split_keyvals ( attr_string , dialect = dialect )
d = dict ( list ( zip ( constants . _gffkeys , fields ) ) )
d [ 'attributes' ] = attrs
d [ 'extra' ] = fields [ 9 : ]
d [ 'keep_order' ] = keep_order
if dialect is None :
dialect = _dialect
return Feature ( dialect = dialect , ** d )
|
def WriteIntermediateInit ( self , out ) :
"""Write a simple _ _ init _ _ . py for an intermediate directory ."""
|
printer = self . _GetPrinter ( out )
printer ( '#!/usr/bin/env python' )
printer ( '"""Shared __init__.py for apitools."""' )
printer ( )
printer ( 'from pkgutil import extend_path' )
printer ( '__path__ = extend_path(__path__, __name__)' )
|
def gpib_command ( library , session , data ) :
"""Write GPIB command bytes on the bus .
Corresponds to viGpibCommand function of the VISA library .
: param library : the visa library wrapped by ctypes .
: param session : Unique logical identifier to a session .
: param data : data tor write .
: type data : bytes
: return : Number of written bytes , return value of the library call .
: rtype : int , : class : ` pyvisa . constants . StatusCode `"""
|
return_count = ViUInt32 ( )
# [ ViSession , ViBuf , ViUInt32 , ViPUInt32]
ret = library . viGpibCommand ( session , data , len ( data ) , byref ( return_count ) )
return return_count . value , ret
|
def instruction_LSR_memory ( self , opcode , ea , m ) :
"""Logical shift right memory location"""
|
r = self . LSR ( m )
# log . debug ( " $ % x LSR memory value $ % x > > 1 = $ % x and write it to $ % x \ t | % s " % (
# self . program _ counter ,
# m , r , ea ,
# self . cfg . mem _ info . get _ shortest ( ea )
return ea , r & 0xff
|
def _monitor_task ( self ) :
"""Wrapper that handles the actual asynchronous monitoring of the task
state ."""
|
if self . task . state in states . UNREADY_STATES :
reactor . callLater ( self . POLL_PERIOD , self . _monitor_task )
return
if self . task . state == 'SUCCESS' :
self . callback ( self . task . result )
elif self . task . state == 'FAILURE' :
self . errback ( Failure ( self . task . result ) )
elif self . task . state == 'REVOKED' :
self . errback ( Failure ( defer . CancelledError ( 'Task {0}' . format ( self . task . id ) ) ) )
else :
self . errback ( ValueError ( 'Cannot respond to `{}` state' . format ( self . task . state ) ) )
|
def get_post_alter_table_index_foreign_key_sql ( self , diff ) :
""": param diff : The table diff
: type diff : orator . dbal . table _ diff . TableDiff
: rtype : list"""
|
if not isinstance ( diff . from_table , Table ) :
raise DBALException ( "Sqlite platform requires for alter table the table" "diff with reference to original table schema" )
sql = [ ]
if diff . new_name :
table_name = diff . get_new_name ( )
else :
table_name = diff . get_name ( self )
for index in self . _get_indexes_in_altered_table ( diff ) . values ( ) :
if index . is_primary ( ) :
continue
sql . append ( self . get_create_index_sql ( index , table_name . get_quoted_name ( self ) ) )
return sql
|
def substitute_group ( self , index , func_grp , strategy , bond_order = 1 , graph_dict = None , strategy_params = None , reorder = True , extend_structure = True ) :
"""Builds off of Molecule . substitute to replace an atom in self . molecule
with a functional group . This method also amends self . graph to
incorporate the new functional group .
NOTE : using a MoleculeGraph will generally produce a different graph
compared with using a Molecule or str ( when not using graph _ dict ) .
This is because of the reordering that occurs when using some of the
local _ env strategies .
: param index : Index of atom to substitute .
: param func _ grp : Substituent molecule . There are three options :
1 . Providing an actual molecule as the input . The first atom
must be a DummySpecie X , indicating the position of
nearest neighbor . The second atom must be the next
nearest atom . For example , for a methyl group
substitution , func _ grp should be X - CH3 , where X is the
first site and C is the second site . What the code will
do is to remove the index site , and connect the nearest
neighbor to the C atom in CH3 . The X - C bond indicates the
directionality to connect the atoms .
2 . A string name . The molecule will be obtained from the
relevant template in func _ groups . json .
3 . A MoleculeGraph object .
: param strategy : Class from pymatgen . analysis . local _ env .
: param bond _ order : A specified bond order to calculate the bond
length between the attached functional group and the nearest
neighbor site . Defaults to 1.
: param graph _ dict : Dictionary representing the bonds of the functional
group ( format : { ( u , v ) : props } , where props is a dictionary of
properties , including weight . If None , then the algorithm
will attempt to automatically determine bonds using one of
a list of strategies defined in pymatgen . analysis . local _ env .
: param strategy _ params : dictionary of keyword arguments for strategy .
If None , default parameters will be used .
: param reorder : bool , representing if graph nodes need to be reordered
following the application of the local _ env strategy
: param extend _ structure : If True ( default ) , then a large artificial box
will be placed around the Molecule , because some strategies assume
periodic boundary conditions .
: return :"""
|
def map_indices ( grp ) :
grp_map = { }
# Get indices now occupied by functional group
# Subtracting 1 because the dummy atom X should not count
atoms = len ( grp ) - 1
offset = len ( self . molecule ) - atoms
for i in range ( atoms ) :
grp_map [ i ] = i + offset
return grp_map
# Work is simplified if a graph is already in place
if isinstance ( func_grp , MoleculeGraph ) :
self . molecule . substitute ( index , func_grp . molecule , bond_order = bond_order )
mapping = map_indices ( func_grp . molecule )
for ( u , v ) in list ( func_grp . graph . edges ( ) ) :
edge_props = func_grp . graph . get_edge_data ( u , v ) [ 0 ]
weight = None
if "weight" in edge_props . keys ( ) :
weight = edge_props [ "weight" ]
del edge_props [ "weight" ]
self . add_edge ( mapping [ u ] , mapping [ v ] , weight = weight , edge_properties = edge_props )
else :
if isinstance ( func_grp , Molecule ) :
func_grp = copy . deepcopy ( func_grp )
else :
try :
func_grp = copy . deepcopy ( FunctionalGroups [ func_grp ] )
except :
raise RuntimeError ( "Can't find functional group in list. " "Provide explicit coordinate instead" )
self . molecule . substitute ( index , func_grp , bond_order = bond_order )
mapping = map_indices ( func_grp )
# Remove dummy atom " X "
func_grp . remove_species ( "X" )
if graph_dict is not None :
for ( u , v ) in graph_dict . keys ( ) :
edge_props = graph_dict [ ( u , v ) ]
if "weight" in edge_props . keys ( ) :
weight = edge_props [ "weight" ]
del edge_props [ "weight" ]
self . add_edge ( mapping [ u ] , mapping [ v ] , weight = weight , edge_properties = edge_props )
else :
if strategy_params is None :
strategy_params = { }
strat = strategy ( ** strategy_params )
graph = self . with_local_env_strategy ( func_grp , strat , reorder = reorder , extend_structure = extend_structure )
for ( u , v ) in list ( graph . graph . edges ( ) ) :
edge_props = graph . graph . get_edge_data ( u , v ) [ 0 ]
weight = None
if "weight" in edge_props . keys ( ) :
weight = edge_props [ "weight" ]
del edge_props [ "weight" ]
if 0 not in list ( graph . graph . nodes ( ) ) : # If graph indices have different indexing
u , v = ( u - 1 ) , ( v - 1 )
self . add_edge ( mapping [ u ] , mapping [ v ] , weight = weight , edge_properties = edge_props )
|
def get_potential_files ( self , ignore_list ) :
"""Get a listing of files for the appropriate task which may or may
not be locked and / or done ."""
|
exclude_prefix = self . taskid == tasks . suffixes . get ( tasks . REALS_TASK , '' ) and 'fk' or None
filenames = [ filename for filename in self . directory_context . get_listing ( self . taskid , exclude_prefix = exclude_prefix ) if filename not in ignore_list and filename not in self . _done and filename not in self . _already_fetched ]
# if the extension is . mpc . then we look for the largest numbered MPC file .
# look for the largest numbered MPC file only .
if self . taskid == tasks . suffixes . get ( tasks . TRACK_TASK , '' ) :
basenames = { }
for filename in filenames :
fullname = os . path . splitext ( filename ) [ 0 ]
if fullname in basenames :
continue
basename = os . path . splitext ( fullname ) [ 0 ]
# only do the ' maximum ' search when the 2nd extension is an integer value
try :
idx = int ( filename . split ( '.' ) [ - 2 ] )
if idx > basenames . get ( basename , 0 ) :
basenames [ basename ] = idx
except : # since we failed , just keep the file in the list
basenames [ fullname ] = ''
filenames = [ ]
for basename in basenames : # sometimes the version is empty , so no ' . ' is needed
version = basenames [ basename ]
version = len ( str ( version ) ) > 0 and ".{}" . format ( version ) or version
filenames . append ( "{}{}{}" . format ( basename , version , self . taskid ) )
# print basename , basenames [ basename ] , filenames [ - 1]
return filenames
|
def get_doc ( self , objtxt ) :
"""Get object documentation dictionary"""
|
obj , valid = self . _eval ( objtxt )
if valid :
return getdoc ( obj )
|
def find_child ( sexpr : Sexpr , * tags : str ) -> Optional [ Sexpr ] :
"""Search for a tag among direct children of the s - expression ."""
|
_assert_valid_sexpr ( sexpr )
for child in sexpr [ 1 : ] :
if _is_sexpr ( child ) and child [ 0 ] in tags :
return child
return None
|
def parent ( self , index ) :
"""Returns the parent of the model item with the given index . If the item has no parent , an invalid QModelIndex is returned ."""
|
if not index . isValid ( ) :
return QtCore . QModelIndex ( )
childItem = index . internalPointer ( )
# the only place where the parent item is queried
parentItem = childItem . getParent ( )
if parentItem == self . root :
return QtCore . QModelIndex ( )
return self . createIndex ( parentItem . row ( ) , 0 , parentItem )
|
def split ( s , delimter , trim = True , limit = 0 ) : # pragma : no cover
"""Split a string using a single - character delimter
@ params :
` s ` : the string
` delimter ` : the single - character delimter
` trim ` : whether to trim each part . Default : True
@ examples :
` ` ` python
ret = split ( " ' a , b ' , c " , " , " )
# ret = = [ " ' a , b ' " , " c " ]
# ' , ' inside quotes will be recognized .
@ returns :
The list of substrings"""
|
ret = [ ]
special1 = [ '(' , ')' , '[' , ']' , '{' , '}' ]
special2 = [ '\'' , '"' ]
special3 = '\\'
flags1 = [ 0 , 0 , 0 ]
flags2 = [ False , False ]
flags3 = False
start = 0
nlim = 0
for i , c in enumerate ( s ) :
if c == special3 : # next char is escaped
flags3 = not flags3
elif not flags3 : # no escape
if c in special1 :
index = special1 . index ( c )
if index % 2 == 0 :
flags1 [ int ( index / 2 ) ] += 1
else :
flags1 [ int ( index / 2 ) ] -= 1
elif c in special2 :
index = special2 . index ( c )
flags2 [ index ] = not flags2 [ index ]
elif c == delimter and not any ( flags1 ) and not any ( flags2 ) :
r = s [ start : i ]
if trim :
r = r . strip ( )
ret . append ( r )
start = i + 1
nlim = nlim + 1
if limit and nlim >= limit :
break
else : # escaping closed
flags3 = False
r = s [ start : ]
if trim :
r = r . strip ( )
ret . append ( r )
return ret
|
def _set_bfd_l3 ( self , v , load = False ) :
"""Setter method for bfd _ l3 , mapped from YANG variable / hardware / custom _ profile / kap _ custom _ profile / bfd _ l3 ( container )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ bfd _ l3 is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ bfd _ l3 ( ) directly ."""
|
if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = bfd_l3 . bfd_l3 , is_container = 'container' , presence = False , yang_name = "bfd-l3" , rest_name = "bfd-l3" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Configure BFD-L3 protocol KAP parameters' , u'cli-compact-syntax' : None } } , namespace = 'urn:brocade.com:mgmt:brocade-hardware' , defining_module = 'brocade-hardware' , yang_type = 'container' , is_config = True )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """bfd_l3 must be of a type compatible with container""" , 'defined-type' : "container" , 'generated-type' : """YANGDynClass(base=bfd_l3.bfd_l3, is_container='container', presence=False, yang_name="bfd-l3", rest_name="bfd-l3", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure BFD-L3 protocol KAP parameters', u'cli-compact-syntax': None}}, namespace='urn:brocade.com:mgmt:brocade-hardware', defining_module='brocade-hardware', yang_type='container', is_config=True)""" , } )
self . __bfd_l3 = t
if hasattr ( self , '_set' ) :
self . _set ( )
|
def render ( file ) :
"""Pretty print the XML file for rendering ."""
|
with file . open ( ) as fp :
encoding = detect_encoding ( fp , default = 'utf-8' )
file_content = fp . read ( ) . decode ( encoding )
parsed_xml = xml . dom . minidom . parseString ( file_content )
return parsed_xml . toprettyxml ( indent = ' ' , newl = '' )
|
def _handle_wrapper ( self ) :
"""The helper method for handling JSON Post and Get requests ."""
|
if self . request . headers . get ( "X-Requested-With" ) != "XMLHttpRequest" :
logging . error ( "Got JSON request with no X-Requested-With header" )
self . response . set_status ( 403 , message = "Got JSON request with no X-Requested-With header" )
return
self . json_response . clear ( )
try :
self . handle ( )
except errors . MissingYamlError :
logging . debug ( "Could not find 'mapreduce.yaml' file." )
self . json_response . clear ( )
self . json_response [ "error_class" ] = "Notice"
self . json_response [ "error_message" ] = "Could not find 'mapreduce.yaml'"
except Exception , e :
logging . exception ( "Error in JsonHandler, returning exception." )
# TODO ( user ) : Include full traceback here for the end - user .
self . json_response . clear ( )
self . json_response [ "error_class" ] = e . __class__ . __name__
self . json_response [ "error_message" ] = str ( e )
self . response . headers [ "Content-Type" ] = "text/javascript"
try :
output = json . dumps ( self . json_response , cls = json_util . JsonEncoder )
# pylint : disable = broad - except
except Exception , e :
logging . exception ( "Could not serialize to JSON" )
self . response . set_status ( 500 , message = "Could not serialize to JSON" )
return
else :
self . response . out . write ( output )
|
def delete_message_from_handle ( self , queue , receipt_handle ) :
"""Delete a message from a queue , given a receipt handle .
: type queue : A : class : ` boto . sqs . queue . Queue ` object
: param queue : The Queue from which messages are read .
: type receipt _ handle : str
: param receipt _ handle : The receipt handle for the message
: rtype : bool
: return : True if successful , False otherwise ."""
|
params = { 'ReceiptHandle' : receipt_handle }
return self . get_status ( 'DeleteMessage' , params , queue . id )
|
def add_redistribution ( self , protocol , route_map_name = None ) :
"""Adds a protocol redistribution to OSPF
Args :
protocol ( str ) : protocol to redistribute
route _ map _ name ( str ) : route - map to be used to
filter the protocols
Returns :
bool : True if the command completes successfully
Exception :
ValueError : This will be raised if the protocol pass is not one
of the following : [ rip , bgp , static , connected ]"""
|
protocols = [ 'bgp' , 'rip' , 'static' , 'connected' ]
if protocol not in protocols :
raise ValueError ( 'redistributed protocol must be' 'bgp, connected, rip or static' )
if route_map_name is None :
cmd = 'redistribute {}' . format ( protocol )
else :
cmd = 'redistribute {} route-map {}' . format ( protocol , route_map_name )
return self . configure_ospf ( cmd )
|
def _from_dict ( cls , _dict ) :
"""Initialize a QueryEntitiesResponse object from a json dictionary ."""
|
args = { }
if 'entities' in _dict :
args [ 'entities' ] = [ QueryEntitiesResponseItem . _from_dict ( x ) for x in ( _dict . get ( 'entities' ) ) ]
return cls ( ** args )
|
def main ( args = None ) :
"""Decodes bencoded files to python syntax ( like JSON , but with bytes support )"""
|
import sys , pprint
from argparse import ArgumentParser , FileType
parser = ArgumentParser ( description = main . __doc__ )
parser . add_argument ( 'infile' , nargs = '?' , type = FileType ( 'rb' ) , default = sys . stdin . buffer , help = 'bencoded file (e.g. torrent) [Default: stdin]' )
parser . add_argument ( 'outfile' , nargs = '?' , type = FileType ( 'w' ) , default = sys . stdout , help = 'python-syntax serialization [Default: stdout]' )
args = parser . parse_args ( args )
data = bdecode ( args . infile )
pprint . pprint ( data , stream = args . outfile )
|
def get_symbol_ids ( symbol_yml_file , metadata ) :
"""Get a list of ids which describe which class they get mapped to .
Parameters
symbol _ yml _ file : string
Path to a YAML file .
metadata : dict
Metainformation of symbols , like the id on write - math . com .
Has keys ' symbols ' , ' tags ' , ' tags2symbols ' .
Returns
list of dictionaries : Each dictionary represents one output class and has
to have the keys ' id ' ( which is an id on write - math . com ) and
' mappings ' ( which is a list of ids on write - math . com ) . The mappings
list should at least contain the id itself , but can contain more .
Examples
> > > get _ symbol _ ids ( ' symbols . yml ' )
[ { ' id ' : 42 , ' mappings ' : [ 1 , 42 , 456 , 1337 ] } , { ' id ' : 2 , ' mappings ' : [ 2 ] } ]
The yml file has to be of the structure
- { latex : ' A ' }
- { latex : ' B ' }
- { latex : ' O ' ,
mappings : [ ' 0 ' , ' o ' ] }
- { latex : ' C ' }
- { latex : ' : : REJECT : : ' ,
mappings : [ ' : : ALL _ FREE : : ' ] }
- { latex : ' : : ARROW : : ' ,
mappings : [ ' : : TAG / arrow : : ' ] ,
exclude : [ ' \r ightarrow ' ] }"""
|
with open ( symbol_yml_file , 'r' ) as stream :
symbol_cfg = yaml . load ( stream )
symbol_ids = [ ]
symbol_ids_set = set ( )
for symbol in symbol_cfg :
if 'latex' not in symbol :
logging . error ( "Key 'latex' not found for a symbol in %s (%s)" , symbol_yml_file , symbol )
sys . exit ( - 1 )
results = [ el for el in metadata [ 'symbols' ] if el [ 'formula_in_latex' ] == symbol [ 'latex' ] ]
if len ( results ) != 1 :
logging . warning ( "Found %i results for %s: %s" , len ( results ) , symbol [ 'latex' ] , results )
if len ( results ) > 1 :
results = sorted ( results , key = lambda n : n [ 'id' ] )
else :
sys . exit ( - 1 )
mapping_ids = [ results [ 0 ] [ 'id' ] ]
if 'mappings' in symbol :
for msymbol in symbol [ 'mappings' ] :
filtered = [ el for el in metadata [ 'symbols' ] if el [ 'formula_in_latex' ] == msymbol [ 'latex' ] ]
if len ( filtered ) != 1 :
logging . error ( "Found %i results for %s: %s" , len ( filtered ) , msymbol , filtered )
if len ( filtered ) > 1 :
filtered = natsorted ( filtered , key = lambda n : n [ 'id' ] )
else :
sys . exit ( - 1 )
mapping_ids . append ( filtered [ 0 ] [ 'id' ] )
symbol_ids . append ( { 'id' : int ( results [ 0 ] [ 'id' ] ) , 'formula_in_latex' : results [ 0 ] [ 'formula_in_latex' ] , 'mappings' : mapping_ids } )
for id_tmp in mapping_ids :
if id_tmp not in symbol_ids_set :
symbol_ids_set . add ( id_tmp )
else :
for symbol_tmp in symbol_ids :
if id_tmp in symbol_tmp [ 'mappings' ] :
break
logging . error ( 'Symbol id %s is already used: %s' , id_tmp , symbol_tmp )
sys . exit ( - 1 )
# print ( metadata . keys ( ) )
# for el in metadata :
# print ( metadata [ el ] [ 0 ] . keys ( ) )
# TODO : assert no double mappings
# TODO : Support for
# - : : ALL _ FREE : : - meaning the rest of all ids which are not assigned to
# any other class get assigned to this class
# - : : TAG / arrow : : - meaning all ids of the tag arrow get assigned here
# - exclude
logging . info ( '%i base classes and %i write-math ids.' , len ( symbol_ids ) , len ( symbol_ids_set ) )
return symbol_ids
|
def cublasSgemm ( handle , transa , transb , m , n , k , alpha , A , lda , B , ldb , beta , C , ldc ) :
"""Matrix - matrix product for real general matrix ."""
|
status = _libcublas . cublasSgemm_v2 ( handle , _CUBLAS_OP [ transa ] , _CUBLAS_OP [ transb ] , m , n , k , ctypes . byref ( ctypes . c_float ( alpha ) ) , int ( A ) , lda , int ( B ) , ldb , ctypes . byref ( ctypes . c_float ( beta ) ) , int ( C ) , ldc )
cublasCheckStatus ( status )
|
def get_qubits ( self , indices = True ) :
"""Returns all of the qubit indices used in this program , including gate applications and
allocated qubits . e . g .
> > > p = Program ( )
> > > p . inst ( ( " H " , 1 ) )
> > > p . get _ qubits ( )
> > > q = p . alloc ( )
> > > p . inst ( H ( q ) )
> > > len ( p . get _ qubits ( ) )
: param indices : Return qubit indices as integers intead of the
wrapping : py : class : ` Qubit ` object
: return : A set of all the qubit indices used in this program
: rtype : set"""
|
qubits = set ( )
for instr in self . instructions :
if isinstance ( instr , ( Gate , Measurement ) ) :
qubits |= instr . get_qubits ( indices = indices )
return qubits
|
def to_snake_case ( name ) :
"""Convert the input string to snake - cased string ."""
|
s1 = first_cap_re . sub ( r'\1_\2' , name )
# handle acronym words
return all_cap_re . sub ( r'\1_\2' , s1 ) . lower ( )
|
def n_list_comp ( self , node ) :
"""List comprehensions"""
|
p = self . prec
self . prec = 27
n = node [ - 1 ]
assert n == 'list_iter'
# find innermost node
while n == 'list_iter' :
n = n [ 0 ]
# recurse one step
if n == 'list_for' :
n = n [ 3 ]
elif n == 'list_if' :
n = n [ 2 ]
elif n == 'list_if_not' :
n = n [ 2 ]
assert n == 'lc_body'
if node [ 0 ] . kind . startswith ( 'BUILD_LIST' ) :
start = len ( self . f . getvalue ( ) )
self . set_pos_info ( node [ 0 ] , start , start + 1 )
self . write ( '[ ' )
self . preorder ( n [ 0 ] )
# lc _ body
self . preorder ( node [ - 1 ] )
# for / if parts
self . write ( ' ]' )
self . prec = p
self . prune ( )
|
def load_msgpack ( blob , ** kwargs ) :
"""Load a dict packed with msgpack into kwargs for
a Trimesh constructor
Parameters
blob : bytes
msgpack packed dict containing
keys ' vertices ' and ' faces '
Returns
loaded : dict
Keyword args for Trimesh constructor , aka
mesh = trimesh . Trimesh ( * * loaded )"""
|
import msgpack
if hasattr ( blob , 'read' ) :
data = msgpack . load ( blob )
else :
data = msgpack . loads ( blob )
loaded = load_dict ( data )
return loaded
|
def pub_date ( soup ) :
"""Return the publishing date in struct format
pub _ date _ date , pub _ date _ day , pub _ date _ month , pub _ date _ year , pub _ date _ timestamp
Default date _ type is pub"""
|
pub_date = first ( raw_parser . pub_date ( soup , date_type = "pub" ) )
if pub_date is None :
pub_date = first ( raw_parser . pub_date ( soup , date_type = "publication" ) )
if pub_date is None :
return None
( day , month , year ) = ymd ( pub_date )
return date_struct ( year , month , day )
|
def remove ( self , tag , nth = 1 ) :
"""Remove the n - th occurrence of tag in this message .
: param tag : FIX field tag number to be removed .
: param nth : Index of tag if repeating , first is 1.
: returns : Value of the field if removed , None otherwise ."""
|
tag = fix_tag ( tag )
nth = int ( nth )
for i in range ( len ( self . pairs ) ) :
t , v = self . pairs [ i ]
if t == tag :
nth -= 1
if nth == 0 :
self . pairs . pop ( i )
return v
return None
|
def _generate_notebooks_by_category ( notebook_object , dict_by_tag ) :
"""Internal function that is used for generation of the page " Notebooks by Category " .
Parameters
notebook _ object : notebook object
Object of " notebook " class where the body will be created .
dict _ by _ tag : dict
Dictionary where each key is a tag and the respective value will be a list containing the
Notebooks ( title and filename ) that include this tag ."""
|
# = = = = = Insertion of an opening text = = = = =
markdown_cell = OPEN_IMAGE
# = = Generation of a table that group Notebooks by category the information about each signal = =
category_list = list ( NOTEBOOK_KEYS . keys ( ) )
tag_keys = list ( dict_by_tag . keys ( ) )
markdown_cell += """\n<table id="notebook_list" width="100%">
<tr>
<td width="20%" class="center_cell group_by_header_grey"> Category </td>
<td width="60%" class="center_cell group_by_header"></td>
<td width="20%" class="center_cell"></td>
</tr>"""
for i , category in enumerate ( category_list ) :
if category != "MainFiles" :
if category . lower ( ) in tag_keys :
if i == 0 :
first_border = "color1_top"
else :
first_border = ""
nbr_notebooks = len ( dict_by_tag [ category . lower ( ) ] )
markdown_cell += "\n\t<tr>" "\n\t\t<td rowspan='" + str ( nbr_notebooks + 1 ) + "' class='center_cell open_cell_border_" + str ( NOTEBOOK_KEYS [ category ] ) + "'><span style='float:center'><img src='../../images/icons/" + category + ".png' class='icon' style='vertical-align:middle'></span> <span style='float:center' class='color" + str ( NOTEBOOK_KEYS [ category ] ) + "'>" + category + "</span></td>" "\n\t\t<td class='center_cell color" + str ( NOTEBOOK_KEYS [ category ] ) + "_cell " + first_border + "'><span style='float:center'>" + category + "</span></td>" "\n\t\t<td class='center_cell gradient_color" + str ( NOTEBOOK_KEYS [ category ] ) + "'></td>" "\n\t</tr>"
notebook_list = dict_by_tag [ category . lower ( ) ]
for j , notebook_file in enumerate ( notebook_list ) :
if j == len ( notebook_list ) - 1 :
last_border = "class='border_cell_bottom_white'"
else :
last_border = ""
split_path = notebook_file . replace ( "\\" , "/" ) . split ( "/" )
notebook_name = split_path [ - 1 ] . split ( "&" ) [ 0 ]
notebook_title = split_path [ - 1 ] . split ( "&" ) [ 1 ]
markdown_cell += "\n\t<tr " + last_border + ">" "\n\t\t<td class='center_cell open_cell_light'> <a href='../" + category + "/" + notebook_name + "'>" + notebook_title + "</a> </td>" "\n\t\t<td class='center_cell'> <a href='../" + category + "/" + notebook_name + "'><div class='file_icon'></div></a> </td>" "\n\t</tr>"
markdown_cell += "\n</table>"
# = = = = = Insertion of an introductory text = = = = =
markdown_cell += DESCRIPTION_CATEGORY
# = = = = = Insertion of the HTML table inside a markdown cell = = = = =
notebook_object [ "cells" ] . append ( nb . v4 . new_markdown_cell ( markdown_cell ) )
|
def create ( self , url , pathToWebarchive ) :
"""* create the webarchive object *
* * Key Arguments : * *
- ` ` url ` ` - - the url of the webpage to generate the webarchive for
- ` ` pathToWebarchive ` ` - - tthe path to output the the webarchive file to
* * Return : * *
- ` ` webarchive ` ` - - the path to the webarchive ( or - 1 if the generation fails )
* * Usage : * *
See class docstring for usage"""
|
self . log . debug ( 'starting the ``create`` method' )
from subprocess import Popen , PIPE , STDOUT
webarchiver = self . settings [ "executables" ] [ "webarchiver" ]
cmd = """%(webarchiver)s -url %(url)s -output "%(pathToWebarchive)s" """ % locals ( )
p = Popen ( cmd , stdout = PIPE , stderr = PIPE , shell = True )
stdout , stderr = p . communicate ( )
self . log . debug ( 'output: %(stdout)s' % locals ( ) )
if len ( stderr ) == 0 :
webarchive = pathToWebarchive
else :
self . log . error ( "Could not generate the webarchive for this webpage: %(url)s. %(stderr)s " % locals ( ) )
return - 1
self . log . debug ( 'completed the ``create`` method' )
return webarchive
|
def RCScan ( ) :
"""Return a prototype Scanner instance for scanning RC source files"""
|
res_re = r'^(?:\s*#\s*(?:include)|' '.*?\s+(?:ICON|BITMAP|CURSOR|HTML|FONT|MESSAGETABLE|TYPELIB|REGISTRY|D3DFX)' '\s*.*?)' '\s*(<|"| )([^>"\s]+)(?:[>"\s])*$'
resScanner = SCons . Scanner . ClassicCPP ( "ResourceScanner" , "$RCSUFFIXES" , "CPPPATH" , res_re , recursive = no_tlb )
return resScanner
|
def define_contributor ( self , request ) :
"""Define contributor by adding it to request . data ."""
|
request . data [ 'contributor' ] = self . resolve_user ( request . user ) . pk
|
def make_instance ( cls , id , client , parent_id = None , json = None ) :
"""Makes an instance of the class this is called on and returns it .
The intended usage is :
instance = Linode . make _ instance ( 123 , client , json = response )
: param cls : The class this was called on .
: param id : The id of the instance to create
: param client : The client to use for this instance
: param parent _ id : The parent id for derived classes
: param json : The JSON to populate the instance with
: returns : A new instance of this type , populated with json"""
|
return Base . make ( id , client , cls , parent_id = parent_id , json = json )
|
def _get_value ( context , key ) :
"""Retrieve a key ' s value from a context item .
Returns _ NOT _ FOUND if the key does not exist .
The ContextStack . get ( ) docstring documents this function ' s intended behavior ."""
|
if isinstance ( context , dict ) : # Then we consider the argument a " hash " for the purposes of the spec .
# We do a membership test to avoid using exceptions for flow control
# ( e . g . catching KeyError ) .
if key in context :
return context [ key ]
elif type ( context ) . __module__ != _BUILTIN_MODULE : # Then we consider the argument an " object " for the purposes of
# the spec .
# The elif test above lets us avoid treating instances of built - in
# types like integers and strings as objects ( cf . issue # 81 ) .
# Instances of user - defined classes on the other hand , for example ,
# are considered objects by the test above .
try :
attr = getattr ( context , key )
except AttributeError : # TODO : distinguish the case of the attribute not existing from
# an AttributeError being raised by the call to the attribute .
# See the following issue for implementation ideas :
# http : / / bugs . python . org / issue7559
pass
else : # TODO : consider using EAFP here instead .
# http : / / docs . python . org / glossary . html # term - eafp
if callable ( attr ) :
return attr ( )
return attr
return _NOT_FOUND
|
def get_static_lib_paths ( ) :
"""Return the required static libraries path"""
|
libs = [ ]
is_linux = sys . platform . startswith ( 'linux' )
if is_linux :
libs += [ '-Wl,--start-group' ]
libs += get_raw_static_lib_path ( )
if is_linux :
libs += [ '-Wl,--end-group' ]
return libs
|
def _handle_nice_params ( optim_params : dict ) -> None :
"""Convert the user friendly params into something the optimizer can
understand ."""
|
# Handle callbacks
optim_params [ "callbacks" ] = _check_callbacks ( optim_params . get ( "callbacks" ) )
optim_params [ "use_callbacks" ] = optim_params [ "callbacks" ] is not None
# Handle negative gradient method
negative_gradient_method = optim_params . pop ( "negative_gradient_method" )
if callable ( negative_gradient_method ) :
negative_gradient_method = negative_gradient_method
elif negative_gradient_method in { "bh" , "BH" , "barnes-hut" } :
negative_gradient_method = kl_divergence_bh
elif negative_gradient_method in { "fft" , "FFT" , "interpolation" } :
negative_gradient_method = kl_divergence_fft
else :
raise ValueError ( "Unrecognized gradient method. Please choose one of " "the supported methods or provide a valid callback." )
# ` gradient _ descent ` uses the more informative name ` objective _ function `
optim_params [ "objective_function" ] = negative_gradient_method
# Handle number of jobs
n_jobs = optim_params . get ( "n_jobs" , 1 )
if n_jobs < 0 :
n_cores = multiprocessing . cpu_count ( )
# Add negative number of n _ jobs to the number of cores , but increment by
# one because - 1 indicates using all cores , - 2 all except one , and so on
n_jobs = n_cores + n_jobs + 1
# If the number of jobs , after this correction is still < = 0 , then the user
# probably thought they had more cores , so we ' ll default to 1
if n_jobs <= 0 :
log . warning ( "`n_jobs` receieved value %d but only %d cores are available. " "Defaulting to single job." % ( optim_params [ "n_jobs" ] , n_cores ) )
n_jobs = 1
optim_params [ "n_jobs" ] = n_jobs
|
def exonic_transcript_effect ( variant , exon , exon_number , transcript ) :
"""Effect of this variant on a Transcript , assuming we already know
that this variant overlaps some exon of the transcript .
Parameters
variant : Variant
exon : pyensembl . Exon
Exon which this variant overlaps
exon _ number : int
Index ( starting from 1 ) of the given exon in the transcript ' s
sequence of exons .
transcript : pyensembl . Transcript"""
|
genome_ref = variant . trimmed_ref
genome_alt = variant . trimmed_alt
variant_start = variant . trimmed_base1_start
variant_end = variant . trimmed_base1_end
# clip mutation to only affect the current exon
if variant_start < exon . start : # if mutation starts before current exon then only look
# at nucleotides which overlap the exon
logger . info ( 'Mutation in variant %s starts before exon %s' , variant , exon )
assert len ( genome_ref ) > 0 , "Unexpected insertion into intron"
n_skip_start = exon . start - variant_start
genome_ref = genome_ref [ n_skip_start : ]
genome_alt = genome_alt [ n_skip_start : ]
genome_start = exon . start
else :
genome_start = variant_start
if variant_end > exon . end : # if mutation goes past exon end then only look at nucleotides
# which overlap the exon
logger . info ( 'Mutation in variant %s ends after exon %s' , variant , exon )
n_skip_end = variant_end - exon . end
genome_ref = genome_ref [ : - n_skip_end ]
genome_alt = genome_alt [ : len ( genome_ref ) ]
genome_end = exon . end
else :
genome_end = variant_end
transcript_offset = interval_offset_on_transcript ( genome_start , genome_end , transcript )
if transcript . on_backward_strand :
cdna_ref = reverse_complement ( genome_ref )
cdna_alt = reverse_complement ( genome_alt )
else :
cdna_ref = genome_ref
cdna_alt = genome_alt
n_ref = len ( cdna_ref )
expected_ref = str ( transcript . sequence [ transcript_offset : transcript_offset + n_ref ] )
if cdna_ref != expected_ref :
raise ValueError ( ( "Found ref nucleotides '%s' in sequence" " of %s at offset %d (chromosome positions %d:%d)" " but variant %s has '%s'" ) % ( expected_ref , transcript , transcript_offset , genome_start , genome_end , variant , cdna_ref ) )
utr5_length = min ( transcript . start_codon_spliced_offsets )
# does the variant start inside the 5 ' UTR ?
if utr5_length > transcript_offset : # does the variant end after the 5 ' UTR , within the coding region ?
if utr5_length < transcript_offset + n_ref : # TODO : we * might * lose the Kozak sequence or the start codon
# but without looking at the modified sequence how can we tell
# for sure that this is a start - loss variant ?
return StartLoss ( variant , transcript )
else : # if variant contained within 5 ' UTR
return FivePrimeUTR ( variant , transcript )
utr3_offset = max ( transcript . stop_codon_spliced_offsets ) + 1
if transcript_offset >= utr3_offset :
return ThreePrimeUTR ( variant , transcript )
exon_start_offset = interval_offset_on_transcript ( exon . start , exon . end , transcript )
exon_end_offset = exon_start_offset + len ( exon ) - 1
# Further below we ' re going to try to predict exonic splice site
# modifications , which will take this effect _ annotation as their
# alternative hypothesis for what happens if splicing doesn ' t change .
# If the mutation doesn ' t affect an exonic splice site , then
# we ' ll just return this effect .
coding_effect_annotation = predict_variant_coding_effect_on_transcript ( variant = variant , transcript = transcript , trimmed_cdna_ref = cdna_ref , trimmed_cdna_alt = cdna_alt , transcript_offset = transcript_offset )
if changes_exonic_splice_site ( transcript = transcript , transcript_ref = cdna_ref , transcript_alt = cdna_alt , transcript_offset = transcript_offset , exon_start_offset = exon_start_offset , exon_end_offset = exon_end_offset , exon_number = exon_number ) :
return ExonicSpliceSite ( variant = variant , transcript = transcript , exon = exon , alternate_effect = coding_effect_annotation )
return coding_effect_annotation
|
def get_mesh ( oqparam ) :
"""Extract the mesh of points to compute from the sites ,
the sites _ csv , or the region .
: param oqparam :
an : class : ` openquake . commonlib . oqvalidation . OqParam ` instance"""
|
global pmap , exposure , gmfs , eids
if 'exposure' in oqparam . inputs and exposure is None : # read it only once
exposure = get_exposure ( oqparam )
if oqparam . sites :
return geo . Mesh . from_coords ( oqparam . sites )
elif 'sites' in oqparam . inputs :
fname = oqparam . inputs [ 'sites' ]
header = get_csv_header ( fname )
if 'lon' in header :
data = [ ]
for i , row in enumerate ( csv . DictReader ( open ( fname , encoding = 'utf-8-sig' ) ) ) :
if header [ 0 ] == 'site_id' and row [ 'site_id' ] != str ( i ) :
raise InvalidFile ( '%s: expected site_id=%d, got %s' % ( fname , i , row [ 'site_id' ] ) )
data . append ( ' ' . join ( [ row [ 'lon' ] , row [ 'lat' ] ] ) )
elif 'gmfs' in oqparam . inputs :
raise InvalidFile ( 'Missing header in %(sites)s' % oqparam . inputs )
else :
data = [ line . replace ( ',' , ' ' ) for line in open ( fname , encoding = 'utf-8-sig' ) ]
coords = valid . coordinates ( ',' . join ( data ) )
start , stop = oqparam . sites_slice
c = ( coords [ start : stop ] if header [ 0 ] == 'site_id' else sorted ( coords [ start : stop ] ) )
return geo . Mesh . from_coords ( c )
elif 'hazard_curves' in oqparam . inputs :
fname = oqparam . inputs [ 'hazard_curves' ]
if isinstance ( fname , list ) : # for csv
mesh , pmap = get_pmap_from_csv ( oqparam , fname )
elif fname . endswith ( '.xml' ) :
mesh , pmap = get_pmap_from_nrml ( oqparam , fname )
else :
raise NotImplementedError ( 'Reading from %s' % fname )
return mesh
elif 'gmfs' in oqparam . inputs :
eids , gmfs = _get_gmfs ( oqparam )
# sets oqparam . sites
return geo . Mesh . from_coords ( oqparam . sites )
elif oqparam . region_grid_spacing :
if oqparam . region :
poly = geo . Polygon . from_wkt ( oqparam . region )
elif 'site_model' in oqparam . inputs :
sm = get_site_model ( oqparam )
poly = geo . Mesh ( sm [ 'lon' ] , sm [ 'lat' ] ) . get_convex_hull ( )
elif exposure :
poly = exposure . mesh . get_convex_hull ( )
else :
raise InvalidFile ( 'There is a grid spacing but not a region, ' 'nor a site model, nor an exposure in %s' % oqparam . inputs [ 'job_ini' ] )
try :
mesh = poly . dilate ( oqparam . region_grid_spacing ) . discretize ( oqparam . region_grid_spacing )
return geo . Mesh . from_coords ( zip ( mesh . lons , mesh . lats ) )
except Exception :
raise ValueError ( 'Could not discretize region with grid spacing ' '%(region_grid_spacing)s' % vars ( oqparam ) )
elif 'exposure' in oqparam . inputs :
return exposure . mesh
|
def update_release_environment ( self , environment_update_data , project , release_id , environment_id ) :
"""UpdateReleaseEnvironment .
[ Preview API ] Update the status of a release environment
: param : class : ` < ReleaseEnvironmentUpdateMetadata > < azure . devops . v5_0 . release . models . ReleaseEnvironmentUpdateMetadata > ` environment _ update _ data : Environment update meta data .
: param str project : Project ID or project name
: param int release _ id : Id of the release .
: param int environment _ id : Id of release environment .
: rtype : : class : ` < ReleaseEnvironment > < azure . devops . v5_0 . release . models . ReleaseEnvironment > `"""
|
route_values = { }
if project is not None :
route_values [ 'project' ] = self . _serialize . url ( 'project' , project , 'str' )
if release_id is not None :
route_values [ 'releaseId' ] = self . _serialize . url ( 'release_id' , release_id , 'int' )
if environment_id is not None :
route_values [ 'environmentId' ] = self . _serialize . url ( 'environment_id' , environment_id , 'int' )
content = self . _serialize . body ( environment_update_data , 'ReleaseEnvironmentUpdateMetadata' )
response = self . _send ( http_method = 'PATCH' , location_id = 'a7e426b1-03dc-48af-9dfe-c98bac612dcb' , version = '5.0-preview.6' , route_values = route_values , content = content )
return self . _deserialize ( 'ReleaseEnvironment' , response )
|
def add_github_roles ( app , repo ) :
"""Add ` ` gh ` ` role to your sphinx documents . It can generate GitHub
links easily : :
: gh : ` issue # 57 ` will generate the issue link
: gh : ` PR # 85 ` will generate the pull request link
Use this function in ` ` conf . py ` ` to enable this feature : :
def setup ( app ) :
sphinx _ typlog _ theme . add _ github _ roles ( app , ' lepture / authlib ' )
: param app : sphinx app
: param repo : GitHub repo , e . g . " lepture / authlib " """
|
from docutils . nodes import reference
from docutils . parsers . rst . roles import set_classes
base_url = 'https://github.com/{}' . format ( repo )
def github_role ( name , rawtext , text , lineno , inliner , options = None , content = None ) :
if '#' in text :
t , n = text . split ( '#' , 1 )
if t . lower ( ) in [ 'issue' , 'issues' ] :
url = base_url + '/issues/{}' . format ( n )
elif t . lower ( ) in [ 'pr' , 'pull' , 'pull request' ] :
url = base_url + '/pull/{}' . format ( n )
elif t . lower ( ) in [ 'commit' , 'commits' ] :
url = base_url + '/commit/{}' . format ( n )
else :
url = base_url + '/' + text
options = options or { 'classes' : [ 'gh' ] }
set_classes ( options )
node = reference ( rawtext , text , refuri = url , ** options )
return [ node ] , [ ]
app . add_role ( 'gh' , github_role )
|
def usearch_cluster_seqs_ref ( fasta_filepath , output_filepath = None , percent_id = 0.97 , sizein = True , sizeout = True , w = 64 , slots = 16769023 , maxrejects = 64 , log_name = "usearch_cluster_seqs.log" , usersort = True , HALT_EXEC = False , save_intermediate_files = False , remove_usearch_logs = False , suppress_new_clusters = False , refseqs_fp = None , output_dir = None , working_dir = None , rev = False ) :
"""Cluster seqs at percent _ id , output consensus fasta
Also appends de novo clustered seqs if suppress _ new _ clusters is False .
Forced to handle reference + de novo in hackish fashion as usearch does not
work as listed in the helpstrings . Any failures are clustered de novo ,
and given unique cluster IDs .
fasta _ filepath = input fasta file , generally a dereplicated fasta
output _ filepath = output reference clustered uc filepath
percent _ id = minimum identity percent .
sizein = not defined in usearch helpstring
sizeout = not defined in usearch helpstring
w = Word length for U - sorting
slots = Size of compressed index table . Should be prime , e . g . 4000003.
Should also specify - - w , typical is - - w 16 or - - w 32.
maxrejects = Max rejected targets , 0 = ignore , default 32.
log _ name = string specifying output log name
usersort = Enable if input fasta not sorted by length purposefully , lest
usearch will raise an error . In post chimera checked sequences , the seqs
are sorted by abundance , so this should be set to True .
HALT _ EXEC : Used for debugging app controller
save _ intermediate _ files : Preserve all intermediate files created .
suppress _ new _ clusters : Disables de novo OTUs when ref based OTU picking
enabled .
refseqs _ fp : Filepath for ref based OTU picking
output _ dir : output directory
rev = search plus and minus strands of sequences"""
|
if not output_filepath :
_ , output_filepath = mkstemp ( prefix = 'usearch_cluster_ref_based' , suffix = '.uc' )
log_filepath = join ( working_dir , log_name )
uc_filepath = join ( working_dir , "clustered_seqs_post_chimera.uc" )
params = { '--sizein' : sizein , '--sizeout' : sizeout , '--id' : percent_id , '--w' : w , '--slots' : slots , '--maxrejects' : maxrejects }
app = Usearch ( params , WorkingDir = working_dir , HALT_EXEC = HALT_EXEC )
if usersort :
app . Parameters [ '--usersort' ] . on ( )
if rev :
app . Parameters [ '--rev' ] . on ( )
data = { '--query' : fasta_filepath , '--uc' : uc_filepath , '--db' : refseqs_fp }
if not remove_usearch_logs :
data [ '--log' ] = log_filepath
app_result = app ( data )
files_to_remove = [ ]
# Need to create fasta file of all hits ( with reference IDs ) ,
# recluster failures if new clusters allowed , and create complete fasta
# file , with unique fasta label IDs .
if suppress_new_clusters :
output_fna_filepath = join ( output_dir , 'ref_clustered_seqs.fasta' )
output_filepath , labels_hits = get_fasta_from_uc_file ( fasta_filepath , uc_filepath , hit_type = "H" , output_dir = output_dir , output_fna_filepath = output_fna_filepath )
files_to_remove . append ( uc_filepath )
else : # Get fasta of successful ref based clusters
output_fna_clustered = join ( output_dir , 'ref_clustered_seqs.fasta' )
output_filepath_ref_clusters , labels_hits = get_fasta_from_uc_file ( fasta_filepath , uc_filepath , hit_type = "H" , output_dir = output_dir , output_fna_filepath = output_fna_clustered )
# get failures and recluster
output_fna_failures = join ( output_dir , 'ref_clustered_seqs_failures.fasta' )
output_filepath_failures , labels_hits = get_fasta_from_uc_file ( fasta_filepath , uc_filepath , hit_type = "N" , output_dir = output_dir , output_fna_filepath = output_fna_failures )
# de novo cluster the failures
app_result , output_filepath_clustered_failures = usearch_cluster_seqs ( output_fna_failures , output_filepath = join ( output_dir , 'clustered_seqs_reference_failures.fasta' ) , percent_id = percent_id , sizein = sizein , sizeout = sizeout , w = w , slots = slots , maxrejects = maxrejects , save_intermediate_files = save_intermediate_files , remove_usearch_logs = remove_usearch_logs , working_dir = working_dir )
output_filepath = concatenate_fastas ( output_fna_clustered , output_fna_failures , output_concat_filepath = join ( output_dir , 'concatenated_reference_denovo_clusters.fasta' ) )
files_to_remove . append ( output_fna_clustered )
files_to_remove . append ( output_fna_failures )
files_to_remove . append ( output_filepath_clustered_failures )
if not save_intermediate_files :
remove_files ( files_to_remove )
return app_result , output_filepath
|
def create_or_update_policy ( self , name , policy , pretty_print = True ) :
"""Add a new or update an existing policy .
Once a policy is updated , it takes effect immediately to all associated users .
Supported methods :
PUT : / sys / policy / { name } . Produces : 204 ( empty body )
: param name : Specifies the name of the policy to create .
: type name : str | unicode
: param policy : Specifies the policy document .
: type policy : str | unicode | dict
: param pretty _ print : If True , and provided a dict for the policy argument , send the policy JSON to Vault with
" pretty " formatting .
: type pretty _ print : bool
: return : The response of the request .
: rtype : requests . Response"""
|
if isinstance ( policy , dict ) :
if pretty_print :
policy = json . dumps ( policy , indent = 4 , sort_keys = True )
else :
policy = json . dumps ( policy )
params = { 'policy' : policy , }
api_path = '/v1/sys/policy/{name}' . format ( name = name )
return self . _adapter . put ( url = api_path , json = params , )
|
def compare ( self , statement_a , statement_b ) :
"""Return the calculated similarity of two
statements based on the Jaccard index ."""
|
# Make both strings lowercase
document_a = self . nlp ( statement_a . text . lower ( ) )
document_b = self . nlp ( statement_b . text . lower ( ) )
statement_a_lemmas = set ( [ token . lemma_ for token in document_a if not token . is_stop ] )
statement_b_lemmas = set ( [ token . lemma_ for token in document_b if not token . is_stop ] )
# Calculate Jaccard similarity
numerator = len ( statement_a_lemmas . intersection ( statement_b_lemmas ) )
denominator = float ( len ( statement_a_lemmas . union ( statement_b_lemmas ) ) )
ratio = numerator / denominator
return ratio
|
def path ( ) :
"""Detect config file path"""
|
# Detect config directory
try :
directory = os . environ [ "DID_DIR" ]
except KeyError :
directory = CONFIG
# Detect config file ( even before options are parsed )
filename = "config"
matched = re . search ( "--confi?g?[ =](\S+)" , " " . join ( sys . argv ) )
if matched :
filename = matched . groups ( ) [ 0 ]
return directory . rstrip ( "/" ) + "/" + filename
|
def export_mesh ( mesh , file_obj , file_type = None , ** kwargs ) :
"""Export a Trimesh object to a file - like object , or to a filename
Parameters
file _ obj : str , file - like
Where should mesh be exported to
file _ type : str or None
Represents file type ( eg : ' stl ' )
Returns
exported : bytes or str
Result of exporter"""
|
# if we opened a file object in this function
# we will want to close it when we ' re done
was_opened = False
if util . is_string ( file_obj ) :
if file_type is None :
file_type = ( str ( file_obj ) . split ( '.' ) [ - 1 ] ) . lower ( )
if file_type in _mesh_exporters :
was_opened = True
file_obj = open ( file_obj , 'wb' )
file_type = str ( file_type ) . lower ( )
if not ( file_type in _mesh_exporters ) :
raise ValueError ( '%s exporter not available!' , file_type )
if isinstance ( mesh , ( list , tuple , set , np . ndarray ) ) :
faces = 0
for m in mesh :
faces += len ( m . faces )
log . debug ( 'Exporting %d meshes with a total of %d faces as %s' , len ( mesh ) , faces , file_type . upper ( ) )
else :
log . debug ( 'Exporting %d faces as %s' , len ( mesh . faces ) , file_type . upper ( ) )
export = _mesh_exporters [ file_type ] ( mesh , ** kwargs )
if hasattr ( file_obj , 'write' ) :
result = util . write_encoded ( file_obj , export )
else :
result = export
if was_opened :
file_obj . close ( )
return result
|
def task_delete ( self , ** kw ) :
"""Marks a task as deleted , optionally specifying a completion
date with the ' end ' argument ."""
|
def validate ( task ) :
if task [ 'status' ] == Status . DELETED :
raise ValueError ( "Task is already deleted." )
return self . _task_change_status ( Status . DELETED , validate , ** kw )
|
def set_tag ( self , ip_dest , next_hop , ** kwargs ) :
"""Set the tag value for the specified route
Args :
ip _ dest ( string ) : The ip address of the destination in the
form of A . B . C . D / E
next _ hop ( string ) : The next hop interface or ip address
* * kwargs [ ' next _ hop _ ip ' ] ( string ) : The next hop address on
destination interface
* * kwargs [ ' distance ' ] ( string ) : Administrative distance for this
route
* * kwargs [ ' tag ' ] ( string ) : Route tag
* * kwargs [ ' route _ name ' ] ( string ) : Route name
Returns :
True if the operation succeeds , otherwise False .
Notes :
Any existing route _ name value must be included in call to
set _ tag , otherwise the tag will be reset
by the call to EOS ."""
|
# Call _ set _ route with the new tag information
return self . _set_route ( ip_dest , next_hop , ** kwargs )
|
def _check_FITS_extvers ( img , extname , extvers ) :
"""Returns True if all ( except None ) extension versions specified by the
argument ' extvers ' and that are of the type specified by the argument
' extname ' are present in the ' img ' FITS file . Returns False if some of the
extension versions for a given EXTNAME cannot be found in the FITS image ."""
|
default_extn = 1 if isinstance ( extname , str ) else 0
if isinstance ( extvers , list ) :
extv = [ default_extn if ext is None else ext for ext in extvers ]
else :
extv = [ default_extn if extvers is None else extvers ]
extv_in_fits = get_extver_list ( img , extname )
return set ( extv ) . issubset ( set ( extv_in_fits ) )
|
def get_log_rhos ( target_action_log_probs , behaviour_action_log_probs ) :
"""With the selected log _ probs for multi - discrete actions of behaviour
and target policies we compute the log _ rhos for calculating the vtrace ."""
|
t = tf . stack ( target_action_log_probs )
b = tf . stack ( behaviour_action_log_probs )
log_rhos = tf . reduce_sum ( t - b , axis = 0 )
return log_rhos
|
def eradicate_pgroup ( self , pgroup , ** kwargs ) :
"""Eradicate a destroyed pgroup .
: param pgroup : Name of pgroup to be eradicated .
: type pgroup : str
: param \ * \ * kwargs : See the REST API Guide on your array for the
documentation on the request :
* * DELETE pgroup / : pgroup * *
: type \ * \ * kwargs : optional
: returns : A dictionary mapping " name " to pgroup .
: rtype : ResponseDict
. . note : :
Requires use of REST API 1.2 or later ."""
|
eradicate = { "eradicate" : True }
eradicate . update ( kwargs )
return self . _request ( "DELETE" , "pgroup/{0}" . format ( pgroup ) , eradicate )
|
def find ( self , item , description = '' , event_type = '' ) :
"""Find regexp in activitylog
find record as if type are in description ."""
|
# TODO : should be refactored , dumb logic
if ': ' in item :
splited = item . split ( ': ' , 1 )
if splited [ 0 ] in self . TYPES :
description = item . split ( ': ' ) [ 1 ]
event_type = item . split ( ': ' ) [ 0 ]
else :
description = item
else :
if not description :
description = item
if event_type :
found = [ x [ 'time' ] for x in self . log if re . search ( description , x [ 'description' ] ) and x [ 'eventTypeText' ] == event_type ]
else :
found = [ x [ 'time' ] for x in self . log if re . search ( description , x [ 'description' ] ) ]
if len ( found ) :
return found
raise exceptions . NotFoundError ( "Item '{}' is not found with (description='{}', event_type='{}')" . format ( item , description , event_type ) )
|
def exclude_file ( self , file ) :
"""True if file should be exclude based on name pattern ."""
|
for pattern in self . compiled_exclude_files :
if ( pattern . match ( file ) ) :
return ( True )
return ( False )
|
def parse ( cls : Type [ MessageT ] , uid : int , data : bytes , permanent_flags : Iterable [ Flag ] , internal_date : datetime , expunged : bool = False , ** kwargs : Any ) -> MessageT :
"""Parse the given file object containing a MIME - encoded email message
into a : class : ` BaseLoadedMessage ` object .
Args :
uid : The UID of the message .
data : The raw contents of the message .
permanent _ flags : Permanent flags for the message .
internal _ date : The internal date of the message .
expunged : True if this message has been expunged from the mailbox ."""
|
content = MessageContent . parse ( data )
return cls ( uid , permanent_flags , internal_date , expunged , content , ** kwargs )
|
def appt_exists ( self , complex : str , house : str , appt : str ) -> bool :
"""Shortcut to check if appt exists in our database ."""
|
try :
self . check_appt ( complex , house , appt )
except exceptions . RumetrApptNotFound :
return False
return True
|
def add_otp_style ( self , zip_odp , style_file ) :
"""takes the slide content and merges in the style _ file"""
|
style = zipwrap . Zippier ( style_file )
for picture_file in style . ls ( "Pictures" ) :
zip_odp . write ( picture_file , style . cat ( picture_file , True ) )
xml_data = style . cat ( "styles.xml" , False )
# import pdb ; pdb . set _ trace ( )
xml_data = self . override_styles ( xml_data )
zip_odp . write ( "styles.xml" , xml_data )
|
def ph_basename ( self , ph_type ) :
"""Return the base name for a placeholder of * ph _ type * in this shape
collection . A notes slide uses a different name for the body
placeholder and has some unique placeholder types , so this
method overrides the default in the base class ."""
|
return { PP_PLACEHOLDER . BODY : 'Notes Placeholder' , PP_PLACEHOLDER . DATE : 'Date Placeholder' , PP_PLACEHOLDER . FOOTER : 'Footer Placeholder' , PP_PLACEHOLDER . HEADER : 'Header Placeholder' , PP_PLACEHOLDER . SLIDE_IMAGE : 'Slide Image Placeholder' , PP_PLACEHOLDER . SLIDE_NUMBER : 'Slide Number Placeholder' , } [ ph_type ]
|
def _adorn_subplots ( self ) :
"""Common post process unrelated to data"""
|
if len ( self . axes ) > 0 :
all_axes = self . _get_subplots ( )
nrows , ncols = self . _get_axes_layout ( )
_handle_shared_axes ( axarr = all_axes , nplots = len ( all_axes ) , naxes = nrows * ncols , nrows = nrows , ncols = ncols , sharex = self . sharex , sharey = self . sharey )
for ax in self . axes :
if self . yticks is not None :
ax . set_yticks ( self . yticks )
if self . xticks is not None :
ax . set_xticks ( self . xticks )
if self . ylim is not None :
ax . set_ylim ( self . ylim )
if self . xlim is not None :
ax . set_xlim ( self . xlim )
ax . grid ( self . grid )
if self . title :
if self . subplots :
if is_list_like ( self . title ) :
if len ( self . title ) != self . nseries :
msg = ( 'The length of `title` must equal the number ' 'of columns if using `title` of type `list` ' 'and `subplots=True`.\n' 'length of title = {}\n' 'number of columns = {}' ) . format ( len ( self . title ) , self . nseries )
raise ValueError ( msg )
for ( ax , title ) in zip ( self . axes , self . title ) :
ax . set_title ( title )
else :
self . fig . suptitle ( self . title )
else :
if is_list_like ( self . title ) :
msg = ( 'Using `title` of type `list` is not supported ' 'unless `subplots=True` is passed' )
raise ValueError ( msg )
self . axes [ 0 ] . set_title ( self . title )
|
def find_dotenv ( filename = '.env' , raise_error_if_not_found = False , usecwd = False ) :
"""Search in increasingly higher folders for the given file
Returns path to the file if found , or an empty string otherwise"""
|
if usecwd or '__file__' not in globals ( ) : # should work without _ _ file _ _ , e . g . in REPL or IPython notebook
path = os . getcwd ( )
else : # will work for . py files
frame_filename = sys . _getframe ( ) . f_back . f_code . co_filename
path = os . path . dirname ( os . path . abspath ( frame_filename ) )
for dirname in _walk_to_root ( path ) :
check_path = os . path . join ( dirname , filename )
if os . path . exists ( check_path ) :
return check_path
if raise_error_if_not_found :
raise IOError ( 'File not found' )
return ''
|
def miller_index_from_sites ( lattice , coords , coords_are_cartesian = True , round_dp = 4 , verbose = True ) :
"""Get the Miller index of a plane from a list of site coordinates .
A minimum of 3 sets of coordinates are required . If more than 3 sets of
coordinates are given , the best plane that minimises the distance to all
points will be calculated .
Args :
lattice ( list or Lattice ) : A 3x3 lattice matrix or ` Lattice ` object ( for
example obtained from Structure . lattice ) .
coords ( iterable ) : A list or numpy array of coordinates . Can be
cartesian or fractional coordinates . If more than three sets of
coordinates are provided , the best plane that minimises the
distance to all sites will be calculated .
coords _ are _ cartesian ( bool , optional ) : Whether the coordinates are
in cartesian space . If using fractional coordinates set to False .
round _ dp ( int , optional ) : The number of decimal places to round the
miller index to .
verbose ( bool , optional ) : Whether to print warnings .
Returns :
( tuple ) : The Miller index ."""
|
if not isinstance ( lattice , Lattice ) :
lattice = Lattice ( lattice )
return lattice . get_miller_index_from_coords ( coords , coords_are_cartesian = coords_are_cartesian , round_dp = round_dp , verbose = verbose )
|
def urlretrieve ( self , url , filename , data = None ) :
"""Similar to urllib . urlretrieve or urllib . request . urlretrieve
only that * filname * is required .
: param url : URL to download .
: param filename : Filename to save the content to .
: param data : Valid URL - encoded data .
: return : Tuple containing path and headers ."""
|
logger . info ( 'saving: \'%s\' to \'%s\'' , url , filename )
if _is_py3 :
return _urlretrieve_with_opener ( self . opener , url , filename , data = data )
return self . opener2 . retrieve ( url , filename , data = data )
|
def plot ( self , numPoints = 100 ) :
"""Specific plotting method for cylinders ."""
|
fig = plt . figure ( )
ax = fig . add_subplot ( 111 , projection = '3d' )
# generate sphere
phi , theta = np . meshgrid ( np . linspace ( 0 , pi , numPoints ) , np . linspace ( 0 , 2 * pi , numPoints ) )
x = self . radius * np . sin ( phi ) * np . cos ( theta )
y = self . radius * np . sin ( phi ) * np . sin ( theta )
z = self . radius * np . cos ( phi )
# plot
ax . plot_surface ( x , y , z , alpha = 0.2 , rstride = 20 , cstride = 10 )
ax . set_xlabel ( "X" )
ax . set_ylabel ( "Y" )
ax . set_zlabel ( "Z" )
plt . title ( "{}" . format ( self ) )
return fig , ax
|
def startSubscription ( self , reqId , subscriber , contract = None ) :
"""Register a live subscription ."""
|
self . _reqId2Contract [ reqId ] = contract
self . reqId2Subscriber [ reqId ] = subscriber
|
def main ( ) :
"""Be the top - level entrypoint . Return a shell status code ."""
|
commands = { 'hash' : peep_hash , 'install' : peep_install , 'port' : peep_port }
try :
if len ( argv ) >= 2 and argv [ 1 ] in commands :
return commands [ argv [ 1 ] ] ( argv [ 2 : ] )
else : # Fall through to top - level pip main ( ) for everything else :
return pip . main ( )
except PipException as exc :
return exc . error_code
|
def _get_unique_index ( self , dropna = False ) :
"""Returns an index containing unique values .
Parameters
dropna : bool
If True , NaN values are dropped .
Returns
uniques : index"""
|
if self . is_unique and not dropna :
return self
values = self . values
if not self . is_unique :
values = self . unique ( )
if dropna :
try :
if self . hasnans :
values = values [ ~ isna ( values ) ]
except NotImplementedError :
pass
return self . _shallow_copy ( values )
|
def create_room ( self , vc_room , event ) :
"""Create a new Vidyo room for an event , given a VC room .
In order to create the Vidyo room , the function will try to do so with
all the available identities of the user based on the authenticators
defined in Vidyo plugin ' s settings , in that order .
: param vc _ room : VCRoom - - The VC room from which to create the Vidyo
room
: param event : Event - - The event to the Vidyo room will be attached"""
|
client = AdminClient ( self . settings )
owner = retrieve_principal ( vc_room . data [ 'owner' ] )
login_gen = iter_user_identities ( owner )
login = next ( login_gen , None )
if login is None :
raise VCRoomError ( _ ( "No valid Vidyo account found for this user" ) , field = 'owner_user' )
extension_gen = iter_extensions ( self . settings . get ( 'indico_room_prefix' ) , event . id )
extension = next ( extension_gen )
while True :
room_mode = { 'isLocked' : False , 'hasPIN' : bool ( vc_room . data [ 'room_pin' ] ) , 'hasModeratorPIN' : bool ( vc_room . data [ 'moderation_pin' ] ) }
if room_mode [ 'hasPIN' ] :
room_mode [ 'roomPIN' ] = vc_room . data [ 'room_pin' ]
if room_mode [ 'hasModeratorPIN' ] :
room_mode [ 'moderatorPIN' ] = vc_room . data [ 'moderation_pin' ]
room_obj = client . create_room_object ( name = vc_room . name , RoomType = 'Public' , ownerName = login , extension = extension , groupName = self . settings . get ( 'room_group_name' ) , description = vc_room . data [ 'description' ] , RoomMode = room_mode )
if room_obj . RoomMode . hasPIN :
room_obj . RoomMode . roomPIN = vc_room . data [ 'room_pin' ]
if room_obj . RoomMode . hasModeratorPIN :
room_obj . RoomMode . moderatorPIN = vc_room . data [ 'moderation_pin' ]
try :
client . add_room ( room_obj )
except APIException as err :
err_msg = err . message
if err_msg . startswith ( 'Room exist for name' ) :
raise VCRoomError ( _ ( "Room name already in use" ) , field = 'name' )
elif err_msg . startswith ( 'Member not found for ownerName' ) :
login = next ( login_gen , None )
if login is None :
raise VCRoomError ( _ ( "No valid Vidyo account found for this user" ) , field = 'owner_user' )
elif err_msg . startswith ( 'Room exist for extension' ) :
extension = next ( extension_gen )
else :
raise
else : # get room back , in order to fetch Vidyo - set parameters
created_room = client . find_room ( extension )
if not created_room :
raise VCRoomNotFoundError ( _ ( "Could not find newly created room in Vidyo" ) )
vc_room . data . update ( { 'vidyo_id' : unicode ( created_room . roomID ) , 'url' : created_room . RoomMode . roomURL , 'owner_identity' : created_room . ownerName } )
flag_modified ( vc_room , 'data' )
vc_room . vidyo_extension = VidyoExtension ( vc_room_id = vc_room . id , extension = int ( created_room . extension ) , owned_by_user = owner )
client . set_automute ( created_room . roomID , vc_room . data [ 'auto_mute' ] )
break
|
def do_part ( self , cmdargs , nick , target , msgtype , send , c ) :
"""Leaves a channel .
Prevent user from leaving the primary channel ."""
|
channel = self . config [ 'core' ] [ 'channel' ]
botnick = self . config [ 'core' ] [ 'nick' ]
if not cmdargs : # don ' t leave the primary channel
if target == channel :
send ( "%s must have a home." % botnick )
return
else :
cmdargs = target
if not cmdargs . startswith ( ( '#' , '+' , '@' ) ) :
cmdargs = '#' + cmdargs
# don ' t leave the primary channel
if cmdargs == channel :
send ( "%s must have a home." % botnick )
return
# don ' t leave the control channel
if cmdargs == self . config [ 'core' ] [ 'ctrlchan' ] :
send ( "%s must remain under control, or bad things will happen." % botnick )
return
self . send ( cmdargs , nick , "Leaving at the request of %s" % nick , msgtype )
c . part ( cmdargs )
|
def Geldart_Ling ( mp , rhog , D , mug ) :
r'''Calculates saltation velocity of the gas for pneumatic conveying ,
according to [ 1 ] _ as described in [ 2 ] _ and [ 3 ] _ .
if Gs / D < 47000 , use equation 1 , otherwise use equation 2.
. . math : :
V _ { salt } = 1.5G _ s ^ { 0.465 } D ^ { - 0.01 } \ mu ^ { 0.055 } \ rho _ f ^ { - 0.42}
V _ { salt } = 8.7G _ s ^ { 0.302 } D ^ { 0.153 } \ mu ^ { 0.055 } \ rho _ f ^ { - 0.42}
Fr _ s = 15 \ mu ^ { 0.25 } \ left ( \ frac { d _ p } { D } \ right ) ^ { 0.1}
Fr _ s = \ frac { V _ { salt } } { \ sqrt { gD } }
\ mu = \ frac { m _ p } { \ frac { \ pi } { 4 } D ^ 2V \ rho _ f }
G _ s = \ frac { m _ p } { A }
Parameters
mp : float
Solid mass flow rate , [ kg / s ]
rhog : float
Gas density , [ kg / m ^ 3]
D : float
Diameter of pipe , [ m ]
mug : float
Gas viscosity , [ Pa * S ]
Returns
V : float
Saltation velocity of gas , [ m / s ]
Notes
Model is rearanged to be explicit in terms of saltation velocity
internally .
Examples
> > > Geldart _ Ling ( 1 . , 1.2 , 0.1 , 2E - 5)
7.467495862402707
References
. . [ 1 ] Weber , M . 1981 . Principles of hydraulic and pneumatic conveying in
pipes . Bulk Solids Handling 1 : 57-63.
. . [ 2 ] Rabinovich , Evgeny , and Haim Kalman . " Threshold Velocities of
Particle - Fluid Flows in Horizontal Pipes and Ducts : Literature Review . "
Reviews in Chemical Engineering 27 , no . 5-6 ( January 1 , 2011 ) .
doi : 10.1515 / REVCE . 2011.011.
. . [ 3 ] Gomes , L . M . , and A . L . Amarante Mesquita . " On the Prediction of
Pickup and Saltation Velocities in Pneumatic Conveying . " Brazilian
Journal of Chemical Engineering 31 , no . 1 ( March 2014 ) : 35-46.
doi : 10.1590 / S0104-66322014000100005'''
|
Gs = mp / ( pi / 4 * D ** 2 )
if Gs / D <= 47000 :
V = 1.5 * Gs ** 0.465 * D ** - 0.01 * mug ** 0.055 * rhog ** - 0.42
else :
V = 8.7 * Gs ** 0.302 * D ** 0.153 * mug ** 0.055 * rhog ** - 0.42
return V
|
def _eval ( self , e , n , extra_constraints = ( ) , exact = None ) :
"""Evaluate an expression , using the solver if necessary . Returns primitives .
: param e : the expression
: param n : the number of desired solutions
: param extra _ constraints : extra constraints to apply to the solver
: param exact : if False , returns approximate solutions
: return : a tuple of the solutions , in the form of Python primitives
: rtype : tuple"""
|
return self . _solver . eval ( e , n , extra_constraints = self . _adjust_constraint_list ( extra_constraints ) , exact = exact )
|
def push ( h , x ) :
"""Push a new value into heap ."""
|
h . push ( x )
up ( h , h . size ( ) - 1 )
|
def returner ( load ) :
'''Write return to all returners in multi _ returner'''
|
for returner_ in __opts__ [ CONFIG_KEY ] :
_mminion ( ) . returners [ '{0}.returner' . format ( returner_ ) ] ( load )
|
def trigger ( self , transport ) :
"""Triggers the transport ."""
|
logger . debug ( 'IEC60488 trigger' )
with transport :
try :
transport . trigger ( )
except AttributeError :
trigger_msg = self . create_message ( '*TRG' )
transport . write ( trigger_msg )
|
def get_changes ( self , getter = None , setter = None , resources = None , task_handle = taskhandle . NullTaskHandle ( ) ) :
"""Get the changes this refactoring makes
If ` getter ` is not ` None ` , that will be the name of the
getter , otherwise ` ` get _ $ { field _ name } ` ` will be used . The
same is true for ` setter ` and if it is None set _ $ { field _ name } is
used .
` resources ` can be a list of ` rope . base . resource . File ` \ s that
the refactoring should be applied on ; if ` None ` all python
files in the project are searched ."""
|
if resources is None :
resources = self . project . get_python_files ( )
changes = ChangeSet ( 'Encapsulate field <%s>' % self . name )
job_set = task_handle . create_jobset ( 'Collecting Changes' , len ( resources ) )
if getter is None :
getter = 'get_' + self . name
if setter is None :
setter = 'set_' + self . name
renamer = GetterSetterRenameInModule ( self . project , self . name , self . pyname , getter , setter )
for file in resources :
job_set . started_job ( file . path )
if file == self . resource :
result = self . _change_holding_module ( changes , renamer , getter , setter )
changes . add_change ( ChangeContents ( self . resource , result ) )
else :
result = renamer . get_changed_module ( file )
if result is not None :
changes . add_change ( ChangeContents ( file , result ) )
job_set . finished_job ( )
return changes
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.