signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
async def send ( self ) :
'''send data that was changed to emby
| coro |
This should be used after using any of the setter . Not necessarily
immediately , but soon after .
See Also
post : same thing
update :
refresh :
Returns
aiohttp . ClientResponse or None if nothing needed updating''' | # Why does the whole dict need to be sent ?
# because emby is dumb , and will break if I don ' t
path = 'Items/{}' . format ( self . id )
resp = await self . connector . post ( path , data = self . object_dict , remote = False )
if resp . status == 400 :
await EmbyObject ( self . object_dict , self . connector ) . update ( )
resp = await self . connector . post ( path , data = self . object_dict , remote = False )
return resp |
def syllabify ( word ) :
'''Syllabify the given word , whether simplex or complex .''' | word = split ( word )
# detect any non - delimited compounds
compound = True if re . search ( r'-| |\.' , word ) else False
syllabify = _syllabify_compound if compound else _syllabify
syll , rules = syllabify ( word )
yield syll , rules
n = 3
if 'T4' in rules :
yield syllabify ( word , T4 = False )
n -= 1
if 'e' in rules :
yield syllabify ( word , T1E = False )
n -= 1
if 'e' in rules and 'T4' in rules :
yield syllabify ( word , T4 = False , T1E = False )
n -= 1
# yield empty syllabifications and rules
for n in range ( 7 ) :
yield '' , '' |
def get_connected_components ( graph ) :
"""Finds all connected components of the graph .
Returns a list of lists , each containing the nodes that form a connected component .
Returns an empty list for an empty graph .""" | list_of_components = [ ]
component = [ ]
# Not strictly necessary due to the while loop structure , but it helps the automated analysis tools
# Store a list of all unreached vertices
unreached = set ( graph . get_all_node_ids ( ) )
to_explore = deque ( )
while len ( unreached ) > 0 : # This happens when we reach the end of a connected component and still have more vertices to search through
if len ( to_explore ) == 0 :
n = unreached . pop ( )
unreached . add ( n )
to_explore . append ( n )
component = [ ]
list_of_components . append ( component )
# This is the BFS that searches for connected vertices
while len ( to_explore ) > 0 :
n = to_explore . pop ( )
if n in unreached :
component . append ( n )
unreached . remove ( n )
nodes = graph . neighbors ( n )
for n in nodes :
if n in unreached :
to_explore . append ( n )
return list_of_components |
def numpy2gdalint ( self ) :
"""create a dictionary for mapping numpy data types to GDAL data type codes
Returns
dict
the type map""" | if not hasattr ( self , '__numpy2gdalint' ) :
tmap = { }
for group in [ 'int' , 'uint' , 'float' , 'complex' ] :
for dtype in np . sctypes [ group ] :
code = gdal_array . NumericTypeCodeToGDALTypeCode ( dtype )
if code is not None :
tmap [ dtype ( ) . dtype . name ] = code
self . __numpy2gdalint = tmap
return self . __numpy2gdalint |
def get_unique_filename ( filename , new_filename = None , new_extension = None ) :
"""Génère un nouveau nom pour un fichier en gardant son extension
Soit le nouveau nom est généré à partir de la date
( heures + minutes + secondes + microsecondes ) soit un nouveau nom est spécifié et on
l ' utilise tel quel .
: type filename : string
: param filename : Nom du fichier original
: type new _ filename : string
: param new _ filename : ( optional ) Nouveau nom de fichier personnalisé , ceci implique que
la date ne sera pas insérée dans le nouveau nom
: type new _ extension : string
: param new _ extension : ( optional ) Force une nouvelle extension de fichier au lieu de
celle de l ' original . À spécifier sans le " point " de début d ' une
extension .
: rtype : string
: return : Nouveau nom de fichier""" | if new_extension :
extension = new_extension
else :
extension = splitext ( filename ) [ 1 ] [ 1 : ]
if not new_filename :
now = real_datetime . now ( )
return '%s%s%s%s.%s' % ( unicode ( now . hour ) . zfill ( 2 ) , unicode ( now . minute ) . zfill ( 2 ) , unicode ( now . second ) . zfill ( 2 ) , unicode ( now . microsecond ) . zfill ( 6 ) , extension )
else :
return '%s.%s' % ( new_filename , extension ) |
def list_cmd ( only_active , only_aliases , verbose ) :
"""List indices .""" | def _tree_print ( d , rec_list = None , verbose = False , indent = 2 ) : # Note that on every recursion rec _ list is copied ,
# which might not be very effective for very deep dictionaries .
rec_list = rec_list or [ ]
for idx , key in enumerate ( sorted ( d ) ) :
line = ( [ '│' + ' ' * indent if i == 1 else ' ' * ( indent + 1 ) for i in rec_list ] )
line . append ( '└──' if len ( d ) - 1 == idx else '├──' )
click . echo ( '' . join ( line ) , nl = False )
if isinstance ( d [ key ] , dict ) :
click . echo ( key )
new_rec_list = rec_list + [ 0 if len ( d ) - 1 == idx else 1 ]
_tree_print ( d [ key ] , new_rec_list , verbose )
else :
leaf_txt = '{} -> {}' . format ( key , d [ key ] ) if verbose else key
click . echo ( leaf_txt )
aliases = ( current_search . active_aliases if only_active else current_search . aliases )
active_aliases = current_search . active_aliases
if only_aliases :
click . echo ( json . dumps ( list ( ( aliases . keys ( ) ) ) , indent = 4 ) )
else : # Mark active indices for printout
aliases = { ( k + ( ' *' if k in active_aliases else '' ) ) : v for k , v in aliases . items ( ) }
click . echo ( _tree_print ( aliases , verbose = verbose ) ) |
def _timestamp_extractor_cmu ( self , staging_audio_basename , str_timestamps_with_sil_conf ) :
"""Parameters
str _ timestamps _ with _ sil _ conf : [ [ str , str , str , str ] ]
Of the form [ [ word , starting _ sec , ending _ sec , confidence ] ]
Returns
timestamps : [ [ str , float , float ] ]""" | filter_untimed = filter ( lambda x : len ( x ) == 4 , str_timestamps_with_sil_conf )
if filter_untimed != str_timestamps_with_sil_conf :
self . __errors [ ( time ( ) , staging_audio_basename ) ] = str_timestamps_with_sil_conf
str_timestamps = [ str_timestamp [ : - 1 ] for str_timestamp in filter_untimed if not any ( [ letter in { "<" , ">" , "/" } for letter in '' . join ( str_timestamp ) ] ) ]
timestamps = list ( [ _WordBlock ( word = re . findall ( "^[^\(]+" , x [ 0 ] ) [ 0 ] , start = round ( float ( x [ 1 ] ) , 2 ) , end = round ( float ( x [ 2 ] ) , 2 ) ) for x in str_timestamps ] )
return timestamps |
def dft_preprocess_data ( arr , shift = True , axes = None , sign = '-' , out = None ) :
"""Pre - process the real - space data before DFT .
This function multiplies the given data with the separable
function : :
p ( x ) = exp ( + - 1j * dot ( x - x [ 0 ] , xi [ 0 ] ) )
where ` ` x [ 0 ] ` ` and ` ` xi [ 0 ] ` ` are the minimum coodinates of
the real - space and reciprocal grids , respectively . The sign of
the exponent depends on the choice of ` ` sign ` ` . In discretized
form , this function becomes an array : :
p [ k ] = exp ( + - 1j * k * s * xi [ 0 ] )
If the reciprocal grid is not shifted , i . e . symmetric around 0,
it is ` ` xi [ 0 ] = pi / s * ( - 1 + 1 / N ) ` ` , hence : :
p [ k ] = exp ( - + 1j * pi * k * ( 1 - 1 / N ) )
For a shifted grid , we have : math : ` ` xi [ 0 ] = - pi / s ` ` , thus the
array is given by : :
p [ k ] = ( - 1 ) * * k
Parameters
arr : ` array - like `
Array to be pre - processed . If its data type is a real
non - floating type , it is converted to ' float64 ' .
shift : bool or or sequence of bools , optional
If ` ` True ` ` , the grid is shifted by half a stride in the negative
direction . With a sequence , this option is applied separately on
each axis .
axes : int or sequence of ints , optional
Dimensions in which to calculate the reciprocal . The sequence
must have the same length as ` ` shift ` ` if the latter is given
as a sequence .
Default : all axes .
sign : { ' - ' , ' + ' } , optional
Sign of the complex exponent .
out : ` numpy . ndarray ` , optional
Array in which the result is stored . If ` ` out is arr ` ` ,
an in - place modification is performed . For real data type ,
this is only possible for ` ` shift = True ` ` since the factors are
complex otherwise .
Returns
out : ` numpy . ndarray `
Result of the pre - processing . If ` ` out ` ` was given , the returned
object is a reference to it .
Notes
If ` ` out ` ` is not specified , the data type of the returned array
is the same as that of ` ` arr ` ` except when ` ` arr ` ` has real data
type and ` ` shift ` ` is not ` ` True ` ` . In this case , the return type
is the complex counterpart of ` ` arr . dtype ` ` .""" | arr = np . asarray ( arr )
if not is_numeric_dtype ( arr . dtype ) :
raise ValueError ( 'array has non-numeric data type {}' '' . format ( dtype_repr ( arr . dtype ) ) )
elif is_real_dtype ( arr . dtype ) and not is_real_floating_dtype ( arr . dtype ) :
arr = arr . astype ( 'float64' )
if axes is None :
axes = list ( range ( arr . ndim ) )
else :
try :
axes = [ int ( axes ) ]
except TypeError :
axes = list ( axes )
shape = arr . shape
shift_list = normalized_scalar_param_list ( shift , length = len ( axes ) , param_conv = bool )
# Make a copy of arr with correct data type if necessary , or copy values .
if out is None :
if is_real_dtype ( arr . dtype ) and not all ( shift_list ) :
out = np . array ( arr , dtype = complex_dtype ( arr . dtype ) , copy = True )
else :
out = arr . copy ( )
else :
out [ : ] = arr
if is_real_dtype ( out . dtype ) and not shift :
raise ValueError ( 'cannot pre-process real input in-place without ' 'shift' )
if sign == '-' :
imag = - 1j
elif sign == '+' :
imag = 1j
else :
raise ValueError ( "`sign` '{}' not understood" . format ( sign ) )
def _onedim_arr ( length , shift ) :
if shift : # ( - 1 ) ^ indices
factor = np . ones ( length , dtype = out . dtype )
factor [ 1 : : 2 ] = - 1
else :
factor = np . arange ( length , dtype = out . dtype )
factor *= - imag * np . pi * ( 1 - 1.0 / length )
np . exp ( factor , out = factor )
return factor . astype ( out . dtype , copy = False )
onedim_arrs = [ ]
for axis , shift in zip ( axes , shift_list ) :
length = shape [ axis ]
onedim_arrs . append ( _onedim_arr ( length , shift ) )
fast_1d_tensor_mult ( out , onedim_arrs , axes = axes , out = out )
return out |
def subtract ( self , expr , simplify ) :
"""Return a new expression where the ` expr ` expression has been removed
from this expression if it exists .""" | args = self . args
if expr in self . args :
args = list ( self . args )
args . remove ( expr )
elif isinstance ( expr , self . __class__ ) :
if all ( arg in self . args for arg in expr . args ) :
args = tuple ( arg for arg in self . args if arg not in expr )
if len ( args ) == 0 :
return None
if len ( args ) == 1 :
return args [ 0 ]
newexpr = self . __class__ ( * args )
if simplify :
newexpr = newexpr . simplify ( )
return newexpr |
def strip_headers_footers_pagebreaks ( docbody , page_break_posns , num_head_lines , num_foot_lines ) :
"""Remove page - break lines , header lines , and footer lines from the
document .
@ param docbody : ( list ) of strings , whereby each string in the list is a
line in the document .
@ param page _ break _ posns : ( list ) of integers , whereby each integer
represents the index in docbody at which a page - break is found .
@ param num _ head _ lines : ( int ) the number of header lines each page in the
document has .
@ param num _ foot _ lines : ( int ) the number of footer lines each page in the
document has .
@ return : ( list ) of strings - the document body after the headers ,
footers , and page - break lines have been stripped from the list .""" | num_breaks = len ( page_break_posns )
page_lens = [ ]
for x in xrange ( 0 , num_breaks ) :
if x < num_breaks - 1 :
page_lens . append ( page_break_posns [ x + 1 ] - page_break_posns [ x ] )
page_lens . sort ( )
if ( len ( page_lens ) > 0 ) and ( num_head_lines + num_foot_lines + 1 < page_lens [ 0 ] ) : # Safe to chop hdrs & ftrs
page_break_posns . reverse ( )
first = 1
for i in xrange ( 0 , len ( page_break_posns ) ) : # Unless this is the last page break , chop headers
if not first :
for dummy in xrange ( 1 , num_head_lines + 1 ) :
docbody [ page_break_posns [ i ] + 1 : page_break_posns [ i ] + 2 ] = [ ]
else :
first = 0
# Chop page break itself
docbody [ page_break_posns [ i ] : page_break_posns [ i ] + 1 ] = [ ]
# Chop footers ( unless this is the first page break )
if i != len ( page_break_posns ) - 1 :
for dummy in xrange ( 1 , num_foot_lines + 1 ) :
docbody [ page_break_posns [ i ] - num_foot_lines : page_break_posns [ i ] - num_foot_lines + 1 ] = [ ]
return docbody |
def _setup_profiles ( self , conversion_profiles ) :
'''Add given conversion profiles checking for invalid profiles''' | # Check for invalid profiles
for key , path in conversion_profiles . items ( ) :
if isinstance ( path , str ) :
path = ( path , )
for left , right in pair_looper ( path ) :
pair = ( _format ( left ) , _format ( right ) )
if pair not in self . converters :
msg = 'Invalid conversion profile %s, unknown step %s'
log . warning ( msg % ( repr ( key ) , repr ( pair ) ) )
break
else : # If it did not break , then add to conversion profiles
self . conversion_profiles [ key ] = path |
def _get_pga_on_rock ( self , C , rup , dists ) :
"""Returns the median PGA on rock , which is a sum of the
magnitude and distance scaling""" | return np . exp ( self . _get_magnitude_scaling_term ( C , rup ) + self . _get_path_scaling ( C , dists , rup . mag ) ) |
def _use_color ( msg , ansi_fmt , output_stream ) :
'''Based on : data : ` ~ exhale . configs . alwaysColorize ` , returns the colorized or
non - colorized output when ` ` output _ stream ` ` is not a TTY ( e . g . redirecting
to a file ) .
* * Parameters * *
` ` msg ` ` ( str )
The message that is going to be printed by the caller of this method .
` ` ansi _ fmt ` ` ( str )
The ANSI color format to use when coloring is supposed to happen .
` ` output _ stream ` ` ( file )
Assumed to be either ` ` sys . stdout ` ` or ` ` sys . stderr ` ` .
* * Return * *
` ` str ` `
The message ` ` msg ` ` in color , or not , depending on both
: data : ` ~ exhale . configs . alwaysColorize ` and whether or not the
` ` output _ stream ` ` is a TTY .''' | if configs . _on_rtd or ( not configs . alwaysColorize and not output_stream . isatty ( ) ) :
log = msg
else :
log = colorize ( msg , ansi_fmt )
return log |
def sync ( self , json_obj = None ) :
"""synchronize this transport with the Ariane server transport
: return :""" | LOGGER . debug ( "Transport.sync" )
if json_obj is None :
params = None
if self . id is not None :
params = SessionService . complete_transactional_req ( { 'ID' : self . id } )
if params is not None :
if MappingService . driver_type != DriverFactory . DRIVER_REST :
params [ 'OPERATION' ] = 'getTransport'
args = { 'properties' : params }
else :
args = { 'http_operation' : 'GET' , 'operation_path' : 'get' , 'parameters' : params }
response = TransportService . requester . call ( args )
if MappingService . driver_type != DriverFactory . DRIVER_REST :
response = response . get ( )
if response . rc == 0 :
json_obj = response . response_content
else :
err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str ( self . id ) + '). ' 'Reason: ' + str ( response . response_content ) + ' - ' + str ( response . error_message ) + " (" + str ( response . rc ) + ")"
LOGGER . warning ( err_msg )
if response . rc == 500 and ArianeMappingOverloadError . ERROR_MSG in response . error_message :
raise ArianeMappingOverloadError ( "Transport.sync" , ArianeMappingOverloadError . ERROR_MSG )
# traceback . print _ stack ( )
elif 'transportID' not in json_obj :
err_msg = 'Transport.sync - Problem while syncing transport (id: ' + str ( self . id ) + '). ' 'Reason: inconsistent json_obj' + str ( json_obj ) + " from : \n"
LOGGER . warning ( err_msg )
# traceback . print _ stack ( )
if json_obj is not None :
self . id = json_obj [ 'transportID' ]
self . name = json_obj [ 'transportName' ]
if MappingService . driver_type != DriverFactory . DRIVER_REST :
if 'transportProperties' in json_obj :
self . properties = DriverTools . json2properties ( json_obj [ 'transportProperties' ] )
else :
self . properties = None
else :
self . properties = json_obj [ 'transportProperties' ] if 'transportProperties' in json_obj else None |
def _createphotoset ( self , myset ) :
"""Creates a photo set ( album ) on FB""" | if not self . _connectToFB ( ) :
print ( "%s - Couldn't connect to fb" % ( directory ) )
return False
logger . debug ( 'fb: Creating photo set %s' % ( myset ) )
resp = self . fb . put_object ( USER_ID , "albums" , name = myset )
if not resp . has_key ( 'id' ) :
logger . error ( "%s - fb: _createphotoset failed to create album" , myset ) ;
return False
else :
return True |
def _generate_iam_role_policy ( self ) :
"""Generate the policy for the IAM Role .
Terraform name : aws _ iam _ role . lambda _ role""" | endpoints = self . config . get ( 'endpoints' )
queue_arns = [ ]
for ep in endpoints :
for qname in endpoints [ ep ] [ 'queues' ] :
qarn = 'arn:aws:sqs:%s:%s:%s' % ( self . aws_region , self . aws_account_id , qname )
if qarn not in queue_arns :
queue_arns . append ( qarn )
pol = { "Version" : "2012-10-17" , "Statement" : [ { "Effect" : "Allow" , "Action" : "logs:CreateLogGroup" , "Resource" : "arn:aws:logs:%s:%s:*" % ( self . aws_region , self . aws_account_id ) } , { "Effect" : "Allow" , "Action" : [ "logs:CreateLogStream" , "logs:PutLogEvents" ] , "Resource" : [ "arn:aws:logs:%s:%s:log-group:%s:*" % ( self . aws_region , self . aws_account_id , '/aws/lambda/%s' % self . resource_name ) ] } , { 'Effect' : 'Allow' , 'Action' : [ 'sqs:ListQueues' ] , 'Resource' : '*' } , { "Effect" : "Allow" , "Action" : [ "sqs:GetQueueUrl" , "sqs:SendMessage" ] , "Resource" : sorted ( queue_arns ) } ] }
self . tf_conf [ 'resource' ] [ 'aws_iam_role_policy' ] [ 'role_policy' ] = { 'name' : self . resource_name , 'role' : '${aws_iam_role.lambda_role.id}' , 'policy' : json . dumps ( pol ) } |
def _decorate_urlconf ( urlpatterns , decorator = require_auth , * args , ** kwargs ) :
'''Decorate all urlpatterns by specified decorator''' | if isinstance ( urlpatterns , ( list , tuple ) ) :
for pattern in urlpatterns :
if getattr ( pattern , 'callback' , None ) :
pattern . _callback = decorator ( pattern . callback , * args , ** kwargs )
if getattr ( pattern , 'url_patterns' , [ ] ) :
_decorate_urlconf ( pattern . url_patterns , decorator , * args , ** kwargs )
else :
if getattr ( urlpatterns , 'callback' , None ) :
urlpatterns . _callback = decorator ( urlpatterns . callback , * args , ** kwargs ) |
def memwarp ( src_ds , res = None , extent = None , t_srs = None , r = None , oudir = None , dst_ndv = 0 , verbose = True ) :
"""Helper function that calls warp for single input Dataset with output to memory ( GDAL Memory Driver )""" | driver = iolib . mem_drv
return warp ( src_ds , res , extent , t_srs , r , driver = driver , dst_ndv = dst_ndv , verbose = verbose ) |
def YamlLoader ( string ) :
"""Load an AFF4 object from a serialized YAML representation .""" | representation = yaml . Parse ( string )
result_cls = aff4 . FACTORY . AFF4Object ( representation [ "aff4_class" ] )
aff4_attributes = { }
for predicate , values in iteritems ( representation [ "attributes" ] ) :
attribute = aff4 . Attribute . PREDICATES [ predicate ]
tmp = aff4_attributes [ attribute ] = [ ]
for rdfvalue_cls_name , value , age in values :
rdfvalue_cls = aff4 . FACTORY . RDFValue ( rdfvalue_cls_name )
value = rdfvalue_cls ( value , age = rdfvalue . RDFDatetime ( age ) )
tmp . append ( value )
# Ensure the object is dirty so when we save it , it can be written to the data
# store .
result = result_cls ( urn = representation [ "_urn" ] , clone = aff4_attributes , mode = "rw" , age = representation [ "age_policy" ] )
result . new_attributes , result . synced_attributes = result . synced_attributes , { }
result . _dirty = True
# pylint : disable = protected - access
return result |
def setCmdline ( self , value = 1 ) :
"""Set cmdline flag""" | # set through dictionary to avoid extra calls to _ _ setattr _ _
if value :
self . __dict__ [ 'flags' ] = self . flags | _cmdlineFlag
else :
self . __dict__ [ 'flags' ] = self . flags & ~ _cmdlineFlag |
def _save_if_needed ( request , response_content ) :
"""Save data to disk , if requested by the user
: param request : Download request
: type request : DownloadRequest
: param response _ content : content of the download response
: type response _ content : bytes""" | if request . save_response :
file_path = request . get_file_path ( )
create_parent_folder ( file_path )
with open ( file_path , 'wb' ) as file :
file . write ( response_content )
LOGGER . debug ( 'Saved data from %s to %s' , request . url , file_path ) |
def createLearningRateScheduler ( self , params , optimizer ) :
"""Creates the learning rate scheduler and attach the optimizer""" | lr_scheduler = params . get ( "lr_scheduler" , None )
if lr_scheduler is None :
return None
if lr_scheduler == "StepLR" :
lr_scheduler_params = "{'step_size': 1, 'gamma':" + str ( params [ "learning_rate_factor" ] ) + "}"
else :
lr_scheduler_params = params . get ( "lr_scheduler_params" , None )
if lr_scheduler_params is None :
raise ValueError ( "Missing 'lr_scheduler_params' for {}" . format ( lr_scheduler ) )
# Get lr _ scheduler class by name
clazz = eval ( "torch.optim.lr_scheduler.{}" . format ( lr_scheduler ) )
# Parse scheduler parameters from config
lr_scheduler_params = eval ( lr_scheduler_params )
return clazz ( optimizer , ** lr_scheduler_params ) |
def do_related ( parser , token ) :
"""Get N related models into a context variable optionally specifying a
named related finder .
* * Usage * * : :
{ % related < limit > [ query _ type ] [ app . model , . . . ] for < object > as < result > % }
* * Parameters * * : :
Option Description
` ` limit ` ` Number of objects to retrieve .
` ` query _ type ` ` Named finder to resolve the related objects ,
falls back to ` ` settings . DEFAULT _ RELATED _ FINDER ` `
when not specified .
` ` app . model ` ` , . . . List of allowed models , all if omitted .
` ` object ` ` Object to get the related for .
` ` result ` ` Store the resulting list in context under given
name .
* * Examples * * : :
{ % related 10 for object as related _ list % }
{ % related 10 directly articles . article , galleries . gallery for object as related _ list % }""" | bits = token . split_contents ( )
obj_var , count , var_name , mods , finder = parse_related_tag ( bits )
return RelatedNode ( obj_var , count , var_name , mods , finder ) |
def _create_m2m_links_step ( self , rel_model_name , rel_key , rel_value , relation_name ) :
"""Link many - to - many models together .
Syntax :
And ` model ` with ` field ` " ` value ` " is linked to ` other model ` in the
database :
Example :
. . code - block : : gherkin
And article with name " Guidelines " is linked to tags in the database :
| name |
| coding |
| style |""" | lookup = { rel_key : rel_value }
rel_model = get_model ( rel_model_name ) . objects . get ( ** lookup )
relation = None
for m2m in rel_model . _meta . many_to_many :
if relation_name in ( m2m . name , m2m . verbose_name ) :
relation = getattr ( rel_model , m2m . name )
break
if not relation :
try :
relation = getattr ( rel_model , relation_name )
except AttributeError :
pass
assert relation , "%s does not have a many-to-many relation named '%s'" % ( rel_model . _meta . verbose_name . capitalize ( ) , relation_name , )
m2m_model = relation . model
for hash_ in self . hashes :
relation . add ( m2m_model . objects . get ( ** hash_ ) ) |
def SetDefaultValue ( self , scan_object ) :
"""Sets the default ( non - match ) value .
Args :
scan _ object : a scan object , either a scan tree sub node ( instance of
PathFilterScanTreeNode ) or a string containing a path .
Raises :
TypeError : if the scan object is of an unsupported type .
ValueError : if the default value is already set .""" | if ( not isinstance ( scan_object , PathFilterScanTreeNode ) and not isinstance ( scan_object , py2to3 . STRING_TYPES ) ) :
raise TypeError ( 'Unsupported scan object type.' )
if self . default_value :
raise ValueError ( 'Default value already set.' )
self . default_value = scan_object |
def run_main ( args : argparse . Namespace , do_exit = True ) -> None :
"""Runs the checks and exits .
To extend this tool , use this function and set do _ exit to False
to get returned the status code .""" | if args . init :
generate ( )
return None
# exit after generate instead of starting to lint
handler = CheckHandler ( file = args . config_file , out_json = args . json , files = args . files )
for style in get_stylers ( ) :
handler . run_linter ( style ( ) )
for linter in get_linters ( ) :
handler . run_linter ( linter ( ) )
for security in get_security ( ) :
handler . run_linter ( security ( ) )
for tool in get_tools ( ) :
tool = tool ( )
# Only run pypi if everything else passed
if tool . name == "pypi" and handler . status_code != 0 :
continue
handler . run_linter ( tool )
if do_exit :
handler . exit ( )
return handler . status_code |
def from_path_and_array ( cls , path , folder , y , classes = None , val_idxs = None , test_name = None , num_workers = 8 , tfms = ( None , None ) , bs = 64 ) :
"""Read in images given a sub - folder and their labels given a numpy array
Arguments :
path : a root path of the data ( used for storing trained models , precomputed values , etc )
folder : a name of the folder in which training images are contained .
y : numpy array which contains target labels ordered by filenames .
bs : batch size
tfms : transformations ( for data augmentations ) . e . g . output of ` tfms _ from _ model `
val _ idxs : index of images to be used for validation . e . g . output of ` get _ cv _ idxs ` .
If None , default arguments to get _ cv _ idxs are used .
test _ name : a name of the folder which contains test images .
num _ workers : number of workers
Returns :
ImageClassifierData""" | assert not ( tfms [ 0 ] is None or tfms [ 1 ] is None ) , "please provide transformations for your train and validation sets"
assert not ( os . path . isabs ( folder ) ) , "folder needs to be a relative path"
fnames = np . core . defchararray . add ( f'{folder}/' , sorted ( os . listdir ( f'{path}{folder}' ) ) )
return cls . from_names_and_array ( path , fnames , y , classes , val_idxs , test_name , num_workers = num_workers , tfms = tfms , bs = bs ) |
def transform_from_rot_trans ( R , t ) :
"""Transforation matrix from rotation matrix and translation vector .""" | R = R . reshape ( 3 , 3 )
t = t . reshape ( 3 , 1 )
return np . vstack ( ( np . hstack ( [ R , t ] ) , [ 0 , 0 , 0 , 1 ] ) ) |
def enrollment_start ( self , name , mode = None , pin = None , phone_number = None ) :
"""Start Client Enrollment . Uses the POST to / enrollments interface .
: Args :
* * client * : ( str ) Client ' s Name
* * mode * : ( str ) DEPRECATED . Presence of PIN is used to determine mode ( AudioPass vs AudioPIN )
* * pin * : ( str ) Client ' s PIN . 4 digit string
* * phone _ number * : ( str ) Phone number to call .
: Returns : ( dict ) Enrollment record with prompts as described ` here < https : / / cloud . knuverse . com / docs / api / # api - Enrollments - Start _ enrollment > ` _ .""" | data = { "name" : name , }
if mode :
warning_msg = 'WARNING: The "mode" parameter for enrollment_start is DEPRECATED and will be ignored. ' 'To avoid incompatibility with a future release please stop providing it.'
print ( warning_msg , file = sys . stderr )
if pin :
data [ "pin" ] = pin
if phone_number :
data [ "phone_number" ] = phone_number
response = self . _post ( url . enrollments , body = data )
self . _check_response ( response , 201 )
return self . _create_response ( response ) |
def get_absolute_name ( package , relative_name ) :
"""Joins a package name and a relative name .
Args :
package : A dotted name , e . g . foo . bar . baz
relative _ name : A dotted name with possibly some leading dots , e . g . . . x . y
Returns :
The relative name appended to the parent ' s package , after going up one
level for each leading dot .
e . g . foo . bar . baz + . . hello . world - > foo . hello . world
The unchanged relative _ name if it does not start with a dot
or has too many leading dots .""" | path = package . split ( '.' ) if package else [ ]
name = relative_name . lstrip ( '.' )
ndots = len ( relative_name ) - len ( name )
if ndots > len ( path ) :
return relative_name
absolute_path = path [ : len ( path ) + 1 - ndots ]
if name :
absolute_path . append ( name )
return '.' . join ( absolute_path ) |
def __scripts ( self , filestem ) :
"""Generates the required scripts .""" | script_construct = open ( '%s/%s' % ( self . tmpdir , RSD . CONSTRUCT ) , 'w' )
script_save = open ( '%s/%s' % ( self . tmpdir , RSD . SAVE ) , 'w' )
script_subgroups = open ( '%s/%s' % ( self . tmpdir , RSD . SUBGROUPS ) , 'w' )
# Permit the owner to execute and read this script
for fn in RSD . SCRIPTS :
os . chmod ( '%s/%s' % ( self . tmpdir , fn ) , S_IREAD | S_IEXEC )
# Writes one line of script
new_script = lambda script : lambda x : script . write ( x + '\n' )
# ' Construction ' script
w = new_script ( script_construct )
w ( ':- initialization(main).' )
w ( 'main :-' )
w ( '[featurize],' )
w ( 'r(%s),' % filestem )
w ( 'w.' )
script_construct . close ( )
# ' Saving ' script
w = new_script ( script_save )
w ( ':- initialization(main).' )
w ( 'main :-' )
w ( '[process],' )
w ( 'r(%s),' % filestem )
w ( 'w,' )
w ( 'w(weka, %s),' % filestem )
w ( 'w(rsd, %s).' % filestem )
script_save . close ( )
# ' Subgroups ' script
w = new_script ( script_subgroups )
w ( ':- initialization(main).' )
w ( 'main :-' )
w ( '[rules],' )
w ( 'r(%s),' % filestem )
w ( 'i,' )
w ( 'w.' )
script_subgroups . close ( ) |
def get_rules ( ) :
"""Returns all enabled rules .
: rtype : [ Rule ]""" | paths = [ rule_path for path in get_rules_import_paths ( ) for rule_path in sorted ( path . glob ( '*.py' ) ) ]
return sorted ( get_loaded_rules ( paths ) , key = lambda rule : rule . priority ) |
def resourceprep ( string , allow_unassigned = False ) :
"""Process the given ` string ` using the Resourceprep ( ` RFC 6122 ` _ ) profile . In
the error cases defined in ` RFC 3454 ` _ ( stringprep ) , a : class : ` ValueError `
is raised .""" | chars = list ( string )
_resourceprep_do_mapping ( chars )
do_normalization ( chars )
check_prohibited_output ( chars , ( stringprep . in_table_c12 , stringprep . in_table_c21 , stringprep . in_table_c22 , stringprep . in_table_c3 , stringprep . in_table_c4 , stringprep . in_table_c5 , stringprep . in_table_c6 , stringprep . in_table_c7 , stringprep . in_table_c8 , stringprep . in_table_c9 , ) )
check_bidi ( chars )
if not allow_unassigned :
check_unassigned ( chars , ( stringprep . in_table_a1 , ) )
return "" . join ( chars ) |
def fetch ( self , refund_id , data = { } , ** kwargs ) :
"""Refund object for given paymnet Id
Args :
refund _ id : Refund Id for which refund has to be retrieved
Returns :
Refund dict for given refund Id""" | return super ( Refund , self ) . fetch ( refund_id , data , ** kwargs ) |
def chain_split ( * splits : Iterable [ Callable [ ... , Any ] ] ) -> Callable [ [ BaseChain ] , Iterable [ BaseChain ] ] : # noqa : E501
"""Construct and execute multiple concurrent forks of the chain .
Any number of forks may be executed . For each fork , provide an iterable of
commands .
Returns the resulting chain objects for each fork .
. . code - block : : python
chain _ a , chain _ b = build (
mining _ chain ,
chain _ split (
( mine _ block ( extra _ data = b ' chain - a ' ) , mine _ block ( ) ) ,
( mine _ block ( extra _ data = b ' chain - b ' ) , mine _ block ( ) , mine _ block ( ) ) ,""" | if not splits :
raise ValidationError ( "Cannot use `chain_split` without providing at least one split" )
@ functools . wraps ( chain_split )
@ to_tuple
def _chain_split ( chain : BaseChain ) -> Iterable [ BaseChain ] :
for split_fns in splits :
result = build ( chain , * split_fns , )
yield result
return _chain_split |
def getActive ( cls ) :
"""Return either the currently active StreamingContext ( i . e . , if there is a context started
but not stopped ) or None .""" | activePythonContext = cls . _activeContext
if activePythonContext is not None : # Verify that the current running Java StreamingContext is active and is the same one
# backing the supposedly active Python context
activePythonContextJavaId = activePythonContext . _jssc . ssc ( ) . hashCode ( )
activeJvmContextOption = activePythonContext . _jvm . StreamingContext . getActive ( )
if activeJvmContextOption . isEmpty ( ) :
cls . _activeContext = None
elif activeJvmContextOption . get ( ) . hashCode ( ) != activePythonContextJavaId :
cls . _activeContext = None
raise Exception ( "JVM's active JavaStreamingContext is not the JavaStreamingContext " "backing the action Python StreamingContext. This is unexpected." )
return cls . _activeContext |
def fetch_ludic_numbers ( limit : int ) -> list :
"""Function to generate and return all ludic numbers up to a given limit .
Examples :
fetch _ ludic _ numbers ( 10)
returns [ 1 , 2 , 3 , 5 , 7]
fetch _ ludic _ numbers ( 25)
returns [ 1 , 2 , 3 , 5 , 7 , 11 , 13 , 17 , 23 , 25]
fetch _ ludic _ numbers ( 45)
returns [ 1 , 2 , 3 , 5 , 7 , 11 , 13 , 17 , 23 , 25 , 29 , 37 , 41 , 43]""" | ludics = list ( range ( 1 , limit + 1 ) )
idx = 1
while idx != len ( ludics ) :
n_val = ludics [ idx ]
del_ind = idx + n_val
while del_ind < len ( ludics ) :
del ludics [ del_ind ]
del_ind += ( n_val - 1 )
idx += 1
return ludics |
def get_rotation_parameters ( phases , magnitudes ) :
"""Simulates one step of rotations .
Given lists of phases and magnitudes of the same length : math : ` N ` ,
such that : math : ` N = 2 ^ n ` for some positive integer : math : ` n ` ,
finds the rotation angles required for one step of phase and magnitude
unification .
: param list phases : real valued phases from : math : ` - \\ pi ` to : math : ` \\ pi ` .
: param list magnitudes : positive , real value magnitudes such that
the sum of the square of each magnitude is
: math : ` 2 ^ { - m } ` for some nonnegative integer : math : ` m ` .
: return : A tuple t of four lists such that
- t [ 0 ] are the z - rotations needed to unify adjacent pairs of phases
- t [ 1 ] are the y - rotations needed to unify adjacent pairs of magnitudes
- t [ 2 ] are the updated phases after these rotations are applied
- t [ 3 ] are the updated magnitudes after these rotations are applied
: rtype : tuple""" | # will hold the angles for controlled rotations
# in the phase unification and probability unification steps ,
# respectively
z_thetas = [ ]
y_thetas = [ ]
# will hold updated phases and magnitudes after rotations
new_phases = [ ]
new_magnitudes = [ ]
for i in range ( 0 , len ( phases ) , 2 ) : # find z rotation angles
phi = phases [ i ]
psi = phases [ i + 1 ]
z_thetas . append ( phi - psi )
# update phases after applying such rotations
kappa = ( phi + psi ) / 2.
new_phases . append ( kappa )
# find y rotation angles
a = magnitudes [ i ]
b = magnitudes [ i + 1 ]
if a == 0 and b == 0 :
y_thetas . append ( 0 )
else :
y_thetas . append ( 2 * np . arcsin ( ( a - b ) / ( np . sqrt ( 2 * ( a ** 2 + b ** 2 ) ) ) ) )
# update magnitudes after applying such rotations
c = np . sqrt ( ( a ** 2 + b ** 2 ) / 2. )
new_magnitudes . append ( c )
return z_thetas , y_thetas , new_phases , new_magnitudes |
def _datetime ( self ) :
"""Conversion of the Date object into a : py : class : ` datetime . datetime ` .
The resulting object is a timezone - naive instance in the REF _ SCALE time - scale""" | if 'dt' not in self . _cache . keys ( ) :
self . _cache [ 'dt' ] = self . MJD_T0 + timedelta ( days = self . _d , seconds = self . _s )
return self . _cache [ 'dt' ] |
def _set_attributes ( self ) :
"""Recursively transforms config dictionaries into instance attrs to make
for easy dot attribute access instead of dictionary access .""" | # turn config dict into nested objects
config = obj ( self . _config_dict )
# set the attributes onto instance
for k , v in self . _config_dict . items ( ) :
setattr ( self , k , getattr ( config , k ) ) |
def aggregate ( self , region_agg = None , sector_agg = None , region_names = None , sector_names = None , inplace = True , pre_aggregation = False ) :
"""Aggregates the IO system .
Aggregation can be given as vector ( use pymrio . build _ agg _ vec ) or
aggregation matrix . In the case of a vector this must be of length
self . get _ regions ( ) / self . get _ sectors ( ) respectively with the new
position as integer or a string of the new name . In the case of
strings the final output order can be specified in region _ dict and
sector _ dict in the format { str1 = int _ pos , str2 = int _ pos , . . . } .
If the sector / region concordance is given as matrix or numerical
vector , generic names will be used for the new sectors / regions . One
can define specific names by defining the aggregation as string
vector
Parameters
region _ agg : list , array or string , optional
The aggregation vector or matrix for the regions ( np . ndarray or
list ) . If string : aggregates to one total region and names is
to the given string .
Pandas Dataframe with columns ' orignal ' and ' aggregated ' .
This is the output from the country _ converter . agg _ conc
sector _ agg : list , arrays or string , optional
The aggregation vector or matrix for the sectors ( np . ndarray or
list ) . If string : aggregates to one total region and names is
to the given string .
region _ names : list , optional
Names for the aggregated regions .
If concordance matrix - in order of rows in this matrix
If concordance vector - in order or num . values in this vector
If string based - same order as the passed string
Not considered if passing a DataFrame - in this case give the
names in the column ' aggregated '
sector _ names : list , optional
Names for the aggregated sectors . Same behaviour as
' region _ names '
inplace : boolean , optional
If True , aggregates the IOSystem in place ( default ) ,
otherwise aggregation happens on a copy of the IOSystem .
Regardless of the setting , the IOSystem is returned to
allow for chained operations .
Returns
IOSystem
Aggregated IOSystem ( if inplace is False )""" | # Development note : This can not be put in the CoreSystem b / c
# than the recalculation of the extension coefficients would not
# work .
if not inplace :
self = self . copy ( )
try :
self . reset_to_flows ( )
except ResetError :
raise AggregationError ( "System under-defined for aggregation - " "do a 'calc_all' before aggregation" )
if type ( region_names ) is str :
region_names = [ region_names ]
if type ( sector_names ) is str :
sector_names = [ sector_names ]
if type ( region_agg ) is pd . DataFrame :
if ( ( 'original' not in region_agg . columns ) or ( 'aggregated' not in region_agg . columns ) ) :
raise ValueError ( 'Passed DataFrame must include the columns ' '"original" and "aggregated"' )
region_agg = ( region_agg . set_index ( 'original' ) . reindex ( self . get_regions ( ) , fill_value = MISSING_AGG_ENTRY [ 'region' ] ) . loc [ : , 'aggregated' ] )
if type ( sector_agg ) is pd . DataFrame :
if ( ( 'original' not in sector_agg . columns ) or ( 'aggregated' not in sector_agg . columns ) ) :
raise ValueError ( 'Passed DataFrame must include the columns ' '"original" and "aggregated"' )
sector_agg = ( sector_agg . set_index ( 'original' ) . reindex ( self . get_sectors ( ) , fill_value = MISSING_AGG_ENTRY [ 'sector' ] ) . loc [ : , 'aggregated' ] )
# fill the aggregation matrix with 1:1 mapping
# if input not given and get names if not given
_same_regions = False
_same_sectors = False
if region_agg is None :
region_agg = self . get_regions ( )
region_names = region_names or self . get_regions ( )
_same_regions = True
if sector_agg is None :
sector_agg = self . get_sectors ( )
sector_names = sector_names or self . get_sectors ( )
_same_sectors = True
# capture total aggregation case
if type ( region_agg ) is str :
region_agg = [ region_agg ] * len ( self . get_regions ( ) )
if type ( sector_agg ) is str :
sector_agg = [ sector_agg ] * len ( self . get_sectors ( ) )
if ioutil . is_vector ( region_agg ) :
region_conc = ioutil . build_agg_matrix ( region_agg )
else :
region_conc = region_agg
if ioutil . is_vector ( sector_agg ) :
sector_conc = ioutil . build_agg_matrix ( sector_agg )
else :
sector_conc = sector_agg
# build the new names
if ( not _same_regions ) and ( not region_names ) :
if isinstance ( region_agg , np . ndarray ) :
region_agg = region_agg . flatten ( ) . tolist ( )
if type ( region_agg [ 0 ] ) is str :
region_names = ioutil . unique_element ( region_agg )
else : # rows in the concordance matrix give the new number of
# regions
region_names = [ GENERIC_NAMES [ 'region' ] + str ( nr ) for nr in range ( region_conc . shape [ 0 ] ) ]
if ( not _same_sectors ) and ( not sector_names ) :
if isinstance ( sector_agg , np . ndarray ) :
sector_agg = ( sector_agg . flatten ( ) . tolist ( ) )
if type ( sector_agg [ 0 ] ) is str :
sector_names = ioutil . unique_element ( sector_agg )
else :
sector_names = [ GENERIC_NAMES [ 'sector' ] + str ( nr ) for nr in range ( sector_conc . shape [ 0 ] ) ]
# Assert right shapes
if not sector_conc . shape [ 1 ] == len ( self . get_sectors ( ) ) :
raise ValueError ( 'Sector aggregation does not ' 'correspond to the number of sectors.' )
if not region_conc . shape [ 1 ] == len ( self . get_regions ( ) ) :
raise ValueError ( 'Region aggregation does not ' 'correspond to the number of regions.' )
if not len ( sector_names ) == sector_conc . shape [ 0 ] :
raise ValueError ( 'New sector names do not ' 'match sector aggregation.' )
if not len ( region_names ) == region_conc . shape [ 0 ] :
raise ValueError ( 'New region names do not ' 'match region aggregation.' )
# build pandas . MultiIndex for the aggregated system
_reg_list_for_sec = [ [ r ] * sector_conc . shape [ 0 ] for r in region_names ]
_reg_list_for_sec = [ entry for entrylist in _reg_list_for_sec for entry in entrylist ]
_reg_list_for_Ycat = [ [ r ] * len ( self . get_Y_categories ( ) ) for r in region_names ]
_reg_list_for_Ycat = [ entry for entrylist in _reg_list_for_Ycat for entry in entrylist ]
_sec_list = list ( sector_names ) * region_conc . shape [ 0 ]
_Ycat_list = list ( self . get_Y_categories ( ) ) * region_conc . shape [ 0 ]
mi_reg_sec = pd . MultiIndex . from_arrays ( [ _reg_list_for_sec , _sec_list ] , names = [ 'region' , 'sector' ] )
mi_reg_Ycat = pd . MultiIndex . from_arrays ( [ _reg_list_for_Ycat , _Ycat_list ] , names = [ 'region' , 'category' ] )
# arrange the whole concordance matrix
conc = np . kron ( region_conc , sector_conc )
conc_y = np . kron ( region_conc , np . eye ( len ( self . get_Y_categories ( ) ) ) )
# Aggregate
self . meta . _add_modify ( 'Aggregate final demand y' )
self . Y = pd . DataFrame ( data = conc . dot ( self . Y ) . dot ( conc_y . T ) , index = mi_reg_sec , columns = mi_reg_Ycat , )
self . meta . _add_modify ( 'Aggregate transaction matrix Z' )
self . Z = pd . DataFrame ( data = conc . dot ( self . Z ) . dot ( conc . T ) , index = mi_reg_sec , columns = mi_reg_sec , )
if self . x is not None : # x could also be obtained from the
# aggregated Z , but aggregate if available
self . x = pd . DataFrame ( data = conc . dot ( self . x ) , index = mi_reg_sec , columns = self . x . columns , )
self . meta . _add_modify ( 'Aggregate industry output x' )
else :
self . x = calc_x ( self . Z , self . Y )
if self . population is not None :
self . meta . _add_modify ( 'Aggregate population vector' )
self . population = pd . DataFrame ( data = region_conc . dot ( self . population . T ) . T , columns = region_names , index = self . population . index , )
for extension in self . get_extensions ( data = True ) :
self . meta . _add_modify ( 'Aggregate extensions...' )
extension . reset_to_flows ( )
st_redo_unit = False
for ik_name , ik_df in zip ( extension . get_DataFrame ( data = False , with_unit = False ) , extension . get_DataFrame ( data = True , with_unit = False ) ) : # Without unit - this is reset aftwards if necessary
if ik_df . index . names == [ 'region' , 'sector' ] == ik_df . columns . names : # Full disaggregated extensions - aggregate both axis
# ( this is the case if the extions shows the flows from
# pda to cba )
extension . __dict__ [ ik_name ] = pd . DataFrame ( data = conc . dot ( ik_df ) . dot ( conc . T ) )
# next step must be done afterwards due to unknown reasons
extension . __dict__ [ ik_name ] . columns = mi_reg_sec
extension . __dict__ [ ik_name ] . index = mi_reg_sec
st_redo_unit = True
elif ( ik_df . index . names == [ 'region' , 'sector' ] and ik_df . columns . names == [ 'region' , 'category' ] ) : # Full disaggregated finald demand satellite account .
# Thats not implemented yet - but aggregation is in place
extension . __dict__ [ ik_name ] = pd . DataFrame ( data = conc . dot ( ik_df ) . dot ( conc_y . T ) )
# next step must be done afterwards due to unknown reasons
extension . __dict__ [ ik_name ] . columns = mi_reg_Ycat
extension . __dict__ [ ik_name ] . index = mi_reg_sec
elif ik_df . columns . names == [ 'region' , 'category' ] : # Satellite account connected to final demand ( e . g . FY )
extension . __dict__ [ ik_name ] = pd . DataFrame ( data = ik_df . dot ( conc_y . T ) )
# next step must be done afterwards due to unknown reasons
extension . __dict__ [ ik_name ] . columns = mi_reg_Ycat
extension . __dict__ [ ik_name ] . index = ik_df . index
else : # Standard case - aggregated columns , keep stressor rows
extension . __dict__ [ ik_name ] = pd . DataFrame ( data = ik_df . dot ( conc . T ) )
# next step must be done afterwards due to unknown reasons
extension . __dict__ [ ik_name ] . columns = mi_reg_sec
extension . __dict__ [ ik_name ] . index = ik_df . index
if st_redo_unit :
try :
_value = extension . unit . iloc [ 0 ] . tolist ( ) [ 0 ]
extension . unit = pd . DataFrame ( index = mi_reg_sec , columns = extension . unit . columns , data = _value )
except AttributeError : # could fail if no unit available
extension . unit = None
self . calc_extensions ( )
return self |
def token_range ( self , first_token , last_token , include_extra = False ) :
"""Yields all tokens in order from first _ token through and including last _ token . If
include _ extra is True , includes non - coding tokens such as tokenize . NL and . COMMENT .""" | for i in xrange ( first_token . index , last_token . index + 1 ) :
if include_extra or not is_non_coding_token ( self . _tokens [ i ] . type ) :
yield self . _tokens [ i ] |
async def forget ( request ) :
"""Called to forget the userid for a request
Args :
request : aiohttp Request object
Raises :
RuntimeError : Middleware is not installed""" | auth_policy = request . get ( POLICY_KEY )
if auth_policy is None :
raise RuntimeError ( 'auth_middleware not installed' )
return await auth_policy . forget ( request ) |
def alleleupdater ( self , sample , gene , targetallele ) :
"""Updates file of alleles if the new allele passes length and identity checks
: param sample : sample object
: param gene : name of gene of interest
: param targetallele : closest allele in database""" | from Bio . Seq import Seq
from Bio . Alphabet import IUPAC
from Bio . SeqRecord import SeqRecord
# As there is some discrepancy with the capitalisation of terms , make sure it is consistent
analysistype = 'rMLST' if self . analysistype . lower ( ) == 'rmlst' else 'MLST'
# Set the directory containing the profile and alleles
alleledir = self . referencefilepath + analysistype + '/local' if self . pipeline else self . referencefilepath
allelefile = glob ( '{}/{}.fa' . format ( alleledir , gene ) ) [ 0 ]
# Create a string to store the last local allele number
nextallele = str ( )
# Check the local allele file to see if this allele has already been recorded
for record in SeqIO . parse ( allelefile , 'fasta' ) : # String match of the sequence to the sequence of the alleles - if they match then set the name of the
# matching allele to this local allele
if sample [ self . analysistype ] . queryseq [ gene ] == str ( record . seq ) :
allelenumber = record . id . split ( '_' ) [ - 1 ]
# Return allelenumber , percent identity ( 100.0 % ) , hsp . score - 100?
return '' , '' , ''
# Record the allele number + 1 ; following the last record , this number will represent the next allele number
nextallele = int ( record . id . split ( '_' ) [ - 1 ] ) + 1
# Translate the nucleotide sequence to determine if there are any internal stop codons
dnaseq = Seq ( sample [ self . analysistype ] . queryseq [ gene ] , IUPAC . unambiguous_dna )
protseq = str ( dnaseq . translate ( ) )
# There should be only one stop codon per sequence . In sequences with more than one stop codon , this additional
# stop codon must be internal
internalstop = True if protseq . count ( '*' ) > 1 else False
if not internalstop : # If the alignment length is less than the subject length , then the query sequence is truncated .
# This may not be an issue , but it must still be flagged
truncated = True if sample [ self . analysistype ] . alignmentlength [ gene ] < sample [ self . analysistype ] . subjectlength [ gene ] else False
if not truncated :
print ( 'full length' , sample . name , gene , nextallele , targetallele , alleledir , allelefile , protseq )
# The header will be > BACT00001_100000
definitionline = '{}_{} {} NT from allele {}### no internal stop codons' . format ( gene , nextallele , sample [ self . analysistype ] . mismatches [ gene ] , sample [ self . analysistype ] . closealleles [ gene ] )
# Create a sequence record using BioPython
fasta = SeqRecord ( dnaseq , # Without this , the header will be improperly formatted
description = '' , # Use > : definitionline as the header
id = definitionline )
print ( definitionline )
# with open ( sample [ self . analysistype ] . supplementalalleles ) as supplemental :
# # Use the SeqIO module to properly format the new sequence record
# SeqIO . write ( fasta , supplemental , ' fasta ' )
# with open ( allelefile ) as localalleles :
# # Use the SeqIO module to properly format the new sequence record
# SeqIO . write ( fasta , localalleles , ' fasta ' )
else :
print ( 'truncated' , sample . name , gene , nextallele , targetallele , alleledir , allelefile , protseq )
else :
print ( 'internal stop' , sample . name , gene , nextallele , targetallele , alleledir , allelefile , protseq )
return '' , '' , '' |
def which ( exe_name ) :
"""Locate a program file in the user ' s path .
@ param exec _ name : name of the executable file .
@ return : ` ` None ` ` if the executable has not been found in the user ' s
path , or the path for the executable file .""" | def is_exe ( file_path_name ) :
return os . path . isfile ( file_path_name ) and os . access ( file_path_name , os . X_OK )
is_platform_windows = ( platform . system ( ) == 'Windows' )
fpath , _fname = os . path . split ( exe_name )
if fpath :
if is_exe ( exe_name ) :
return exe_name
else :
for path in os . environ [ 'PATH' ] . split ( os . pathsep ) :
exe_file_path_name = os . path . join ( path , exe_name )
if is_exe ( exe_file_path_name ) :
return exe_file_path_name
if is_platform_windows :
windows_exe_file_path_name = '%s.exe' % exe_file_path_name
if is_exe ( windows_exe_file_path_name ) :
return windows_exe_file_path_name
windows_com_file_path_name = '%s.exe' % exe_file_path_name
if is_exe ( windows_com_file_path_name ) :
return windows_com_file_path_name
return None |
def get_request ( cls ) :
"""Get the HTTPRequest object from thread storage or from a callee by searching
each frame in the call stack .""" | request = cls . get_global ( 'request' )
if request :
return request
try :
stack = inspect . stack ( )
except IndexError : # in some cases this may return an index error
# ( pyc files dont match py files for example )
return
for frame , _ , _ , _ , _ , _ in stack :
if 'request' in frame . f_locals :
if isinstance ( frame . f_locals [ 'request' ] , HttpRequest ) :
request = frame . f_locals [ 'request' ]
cls . set_global ( 'request' , request )
return request |
def get_token ( authed_user : hug . directives . user ) :
"""Get Job details
: param authed _ user :
: return :""" | user_model = Query ( )
user = db . search ( user_model . username == authed_user ) [ 0 ]
if user :
out = { 'user' : user [ 'username' ] , 'api_key' : user [ 'api_key' ] }
else : # this should never happen
out = { 'error' : 'User {0} does not exist' . format ( authed_user ) }
return out |
def setAutoResizeToContents ( self , state ) :
"""Sets whether or not this widget should automatically resize its
height based on its contents .
: param state | < bool >""" | self . _autoResizeToContents = state
if state :
self . resizeToContents ( )
self . setVerticalScrollBarPolicy ( QtCore . Qt . ScrollBarAlwaysOff )
self . setHorizontalScrollBarPolicy ( QtCore . Qt . ScrollBarAlwaysOff )
else :
self . setVerticalScrollBarPolicy ( QtCore . Qt . ScrollBarAsNeeded )
self . setHorizontalScrollBarPolicy ( QtCore . Qt . ScrollBarAsNeeded ) |
def p_break_statement_1 ( self , p ) :
"""break _ statement : BREAK SEMI
| BREAK AUTOSEMI""" | p [ 0 ] = self . asttypes . Break ( )
p [ 0 ] . setpos ( p ) |
def get_params ( self , tid ) :
"""Returns the parameters found in the stack when the hooked function
was last called by this thread .
@ type tid : int
@ param tid : Thread global ID .
@ rtype : tuple ( arg , arg , arg . . . )
@ return : Tuple of arguments .""" | try :
params = self . get_params_stack ( tid ) [ - 1 ]
except IndexError :
msg = "Hooked function called from thread %d already returned"
raise IndexError ( msg % tid )
return params |
def add_task_to_job ( self , job_or_job_name , task_command , task_name = None , ** kwargs ) :
"""Add a task to a job owned by the Dagobah instance .""" | if isinstance ( job_or_job_name , Job ) :
job = job_or_job_name
else :
job = self . get_job ( job_or_job_name )
if not job :
raise DagobahError ( 'job %s does not exist' % job_or_job_name )
logger . debug ( 'Adding task with command {0} to job {1}' . format ( task_command , job . name ) )
if not job . state . allow_change_graph :
raise DagobahError ( "job's graph is immutable in its current " + "state: %s" % job . state . status )
job . add_task ( task_command , task_name , ** kwargs )
job . commit ( ) |
def _expect ( self , expected , times = 50 ) :
"""Find the ` expected ` line within ` times ` trials .
Args :
expected str : the expected string
times int : number of trials""" | print '[%s] Expecting [%s]' % ( self . port , expected )
retry_times = 10
while times > 0 and retry_times > 0 :
line = self . _readline ( )
print '[%s] Got line [%s]' % ( self . port , line )
if line == expected :
print '[%s] Expected [%s]' % ( self . port , expected )
return
if not line :
retry_times -= 1
time . sleep ( 0.1 )
times -= 1
raise Exception ( 'failed to find expected string[%s]' % expected ) |
def video_search ( self , entitiy_type , query , ** kwargs ) :
"""Search the TV schedule database
Where ` ` entitiy _ type ` ` is a comma separated list of :
` ` movie ` `
Movie
` ` tvseries ` `
TV series
` ` episode ` `
Episode titles
` ` onetimeonly ` `
TV programs
` ` credit ` `
People working in TV or movies""" | return self . make_request ( 'video' , entitiy_type , query , kwargs ) |
def build_score_request ( scoring_system , request , context_name = None , rev_id = None , model_name = None ) :
"""Build an : class : ` ores . ScoreRequest ` from information contained in a
request .
: Parameters :
scoring _ system : : class : ` ores . ScoringSystem `
A scoring system to build request with
request : : class : ` flask . Request `
A web request to extract information from
context _ name : ` str `
The name of the context to perform scoring
rev _ id : int
The revision ID to score . Note that multiple IDs can be provided
in ` request . args `
model _ name = ` str `
The name of the model to score . Note that multiple models can be
provided in ` request . args `""" | rev_ids = parse_rev_ids ( request , rev_id )
model_names = parse_model_names ( request , model_name )
precache = 'precache' in request . args
include_features = 'features' in request . args
injection_caches = parse_injection ( request , rev_id )
model_info = parse_model_info ( request )
if context_name and context_name in scoring_system and not model_names :
model_names = scoring_system [ context_name ] . keys ( )
# WMF specific solution
if request . headers . get ( 'X-Client-IP' ) is None :
ip = request . remote_addr . strip ( )
else :
ip = request . headers [ 'X-Client-IP' ] . strip ( )
return ScoreRequest ( context_name , rev_ids , model_names , precache = precache , include_features = include_features , injection_caches = injection_caches , model_info = model_info , ip = ip ) |
def _disconnect ( self , error ) :
"done" | if self . _on_disconnect :
self . _on_disconnect ( str ( error ) )
if self . _sender :
self . _sender . connectionLost ( Failure ( error ) )
self . _when_done . fire ( Failure ( error ) ) |
def flip_whole ( self , tour ) :
"""Test flipping all contigs at the same time to see if score improves .""" | score , = self . evaluate_tour_Q ( tour )
self . signs = - self . signs
score_flipped , = self . evaluate_tour_Q ( tour )
if score_flipped > score :
tag = ACCEPT
else :
self . signs = - self . signs
tag = REJECT
self . flip_log ( "FLIPWHOLE" , score , score_flipped , tag )
return tag |
def login ( session , username , password , class_name = None ) :
"""Login on coursera . org with the given credentials .
This adds the following cookies to the session :
sessionid , maestro _ login , maestro _ login _ flag""" | logging . debug ( 'Initiating login.' )
try :
session . cookies . clear ( '.coursera.org' )
logging . debug ( 'Cleared .coursera.org cookies.' )
except KeyError :
logging . debug ( 'There were no .coursera.org cookies to be cleared.' )
# Hit class url
if class_name is not None :
class_url = CLASS_URL . format ( class_name = class_name )
r = requests . get ( class_url , allow_redirects = False )
try :
r . raise_for_status ( )
except requests . exceptions . HTTPError as e :
logging . error ( e )
raise ClassNotFound ( class_name )
headers = prepare_auth_headers ( session , include_cauth = False )
data = { 'email' : username , 'password' : password , 'webrequest' : 'true' }
# Auth API V3
r = session . post ( AUTH_URL_V3 , data = data , headers = headers , allow_redirects = False )
try :
r . raise_for_status ( )
# Some how the order of cookies parameters are important
# for coursera ! ! !
v = session . cookies . pop ( 'CAUTH' )
session . cookies . set ( 'CAUTH' , v )
except requests . exceptions . HTTPError as e :
raise AuthenticationFailed ( 'Cannot login on coursera.org: %s' % e )
logging . info ( 'Logged in on coursera.org.' ) |
def reset ( self , fid = 0 ) :
"""Reset the object ' s resources to its initialized state .
: param fid : the id of a sub - fitter""" | self . _checkid ( fid )
self . _fitids [ fid ] [ "solved" ] = False
self . _fitids [ fid ] [ "haserr" ] = False
if not self . _fitids [ fid ] [ "looped" ] :
return self . _fitproxy . reset ( fid )
else :
self . _fitids [ fid ] [ "looped" ] = False
return True |
def request_login ( blink , url , username , password , is_retry = False ) :
"""Login request .
: param blink : Blink instance .
: param url : Login url .
: param username : Blink username .
: param password : Blink password .
: param is _ retry : Is this part of a re - authorization attempt ?""" | headers = { 'Host' : DEFAULT_URL , 'Content-Type' : 'application/json' }
data = dumps ( { 'email' : username , 'password' : password , 'client_specifier' : 'iPhone 9.2 | 2.2 | 222' } )
return http_req ( blink , url = url , headers = headers , data = data , json_resp = False , reqtype = 'post' , is_retry = is_retry ) |
def route ( app_or_blueprint , context = default_context , ** kwargs ) :
"""attach a transmute route .""" | def decorator ( fn ) :
fn = describe ( ** kwargs ) ( fn )
transmute_func = TransmuteFunction ( fn )
routes , handler = create_routes_and_handler ( transmute_func , context )
for r in routes : # push swagger info .
if not hasattr ( app_or_blueprint , SWAGGER_ATTR_NAME ) :
setattr ( app_or_blueprint , SWAGGER_ATTR_NAME , SwaggerSpec ( ) )
swagger_obj = getattr ( app_or_blueprint , SWAGGER_ATTR_NAME )
swagger_obj . add_func ( transmute_func , context )
app_or_blueprint . route ( r , methods = transmute_func . methods ) ( handler )
return handler
return decorator |
def find_out_pattern ( self , pattern ) :
"""This function will read the standard error of the program and return
a matching pattern if found .
EG . prog _ obj . FindErrPattern ( " Update of mySQL failed " )""" | if self . wdir != '' :
stdout = "%s/%s" % ( self . wdir , self . stdout )
else :
stdout = self . stdout
response = [ ]
# First we check if the file we want to print does exists
if os . path . exists ( stdout ) :
with open_ ( stdout , 'r' ) as f :
for line in f :
if pattern in line :
response . append ( line . strip ( ) )
else : # FILE DOESN ' T EXIST
debug . log ( "Error: The stdout file %s does not exist!" % ( stdout ) )
return response |
def _open ( self ) :
"""Opens the compressed log file .
Returns
file : file - like object of the resulting stream .""" | # The gzip module supports directly setting encoding as of Python 3.3.
# pylint : disable = unexpected - keyword - arg
if py2to3 . PY_3 :
return gzip . open ( self . baseFilename , mode = self . mode , encoding = self . encoding )
return gzip . open ( self . baseFilename , self . mode ) |
def get_vnetwork_portgroups_input_datacenter ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_vnetwork_portgroups = ET . Element ( "get_vnetwork_portgroups" )
config = get_vnetwork_portgroups
input = ET . SubElement ( get_vnetwork_portgroups , "input" )
datacenter = ET . SubElement ( input , "datacenter" )
datacenter . text = kwargs . pop ( 'datacenter' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def spark_shape ( points , shapes , fill = None , color = 'blue' , width = 5 , yindex = 0 , heights = None ) :
"""TODO : Docstring for spark .
Parameters
points : array - like
shapes : array - like
fill : array - like , optional
Returns
Chart""" | assert len ( points ) == len ( shapes ) + 1
data = [ { 'marker' : { 'color' : 'white' } , 'x' : [ points [ 0 ] , points [ - 1 ] ] , 'y' : [ yindex , yindex ] } ]
if fill is None :
fill = [ False ] * len ( shapes )
if heights is None :
heights = [ 0.4 ] * len ( shapes )
lays = [ ]
for i , ( shape , height ) in enumerate ( zip ( shapes , heights ) ) :
if shape is None :
continue
if fill [ i ] :
fillcolor = color
else :
fillcolor = 'white'
lays . append ( dict ( type = shape , x0 = points [ i ] , x1 = points [ i + 1 ] , y0 = yindex - height , y1 = yindex + height , xref = 'x' , yref = 'y' , fillcolor = fillcolor , line = dict ( color = color , width = width ) , ) )
layout = dict ( shapes = lays )
return Chart ( data = data , layout = layout ) |
def is_generic_union ( type_ : Type ) -> bool :
"""Determines whether a type is a Union [ . . . ] .
How to do this varies for different Python versions , due to the
typing library not having a stable API . This functions smooths
over the differences .
Args :
type _ : The type to check .
Returns :
True iff it ' s a Union [ . . . something . . . ] .""" | if hasattr ( typing , '_GenericAlias' ) : # 3.7
return ( isinstance ( type_ , typing . _GenericAlias ) and # type : ignore
type_ . __origin__ is Union )
else :
if hasattr ( typing , '_Union' ) : # 3.6
return isinstance ( type_ , typing . _Union )
# type : ignore
else : # 3.5 and earlier ( ? )
return isinstance ( type_ , typing . UnionMeta )
# type : ignore
raise RuntimeError ( 'Could not determine whether type is a Union. Is this' ' a YAtiML-supported Python version?' ) |
def get_attrs ( self ) :
"""Get the global attributes from underlying data set .""" | return FrozenOrderedDict ( ( a , getattr ( self . ds , a ) ) for a in self . ds . ncattrs ( ) ) |
def to_XML ( self , xml_declaration = True , xmlns = True ) :
"""Dumps object fields to an XML - formatted string . The ' xml _ declaration '
switch enables printing of a leading standard XML line containing XML
version and encoding . The ' xmlns ' switch enables printing of qualified
XMLNS prefixes .
: param XML _ declaration : if ` ` True ` ` ( default ) prints a leading XML
declaration line
: type XML _ declaration : bool
: param xmlns : if ` ` True ` ` ( default ) prints full XMLNS prefixes
: type xmlns : bool
: returns : an XML - formatted string""" | root_node = self . _to_DOM ( )
if xmlns :
xmlutils . annotate_with_XMLNS ( root_node , NO2INDEX_XMLNS_PREFIX , NO2INDEX_XMLNS_URL )
return xmlutils . DOM_node_to_XML ( root_node , xml_declaration ) |
def console_to_str ( data ) : # type : ( bytes ) - > Text
"""Return a string , safe for output , of subprocess output .
We assume the data is in the locale preferred encoding .
If it won ' t decode properly , we warn the user but decode as
best we can .
We also ensure that the output can be safely written to
standard output without encoding errors .""" | # First , get the encoding we assume . This is the preferred
# encoding for the locale , unless that is not found , or
# it is ASCII , in which case assume UTF - 8
encoding = locale . getpreferredencoding ( )
if ( not encoding ) or codecs . lookup ( encoding ) . name == "ascii" :
encoding = "utf-8"
# Now try to decode the data - if we fail , warn the user and
# decode with replacement .
try :
decoded_data = data . decode ( encoding )
except UnicodeDecodeError :
logger . warning ( "Subprocess output does not appear to be encoded as %s" , encoding , )
decoded_data = data . decode ( encoding , errors = backslashreplace_decode )
# Make sure we can print the output , by encoding it to the output
# encoding with replacement of unencodable characters , and then
# decoding again .
# We use stderr ' s encoding because it ' s less likely to be
# redirected and if we don ' t find an encoding we skip this
# step ( on the assumption that output is wrapped by something
# that won ' t fail ) .
# The double getattr is to deal with the possibility that we ' re
# being called in a situation where sys . _ _ stderr _ _ doesn ' t exist ,
# or doesn ' t have an encoding attribute . Neither of these cases
# should occur in normal pip use , but there ' s no harm in checking
# in case people use pip in ( unsupported ) unusual situations .
output_encoding = getattr ( getattr ( sys , "__stderr__" , None ) , "encoding" , None )
if output_encoding :
output_encoded = decoded_data . encode ( output_encoding , errors = "backslashreplace" )
decoded_data = output_encoded . decode ( output_encoding )
return decoded_data |
def _PrintAnalysisReportsDetails ( self , storage_reader ) :
"""Prints the details of the analysis reports .
Args :
storage _ reader ( StorageReader ) : storage reader .""" | if not storage_reader . HasAnalysisReports ( ) :
self . _output_writer . Write ( 'No analysis reports stored.\n\n' )
return
for index , analysis_report in enumerate ( storage_reader . GetAnalysisReports ( ) ) :
title = 'Analysis report: {0:d}' . format ( index )
table_view = views . ViewsFactory . GetTableView ( self . _views_format_type , title = title )
table_view . AddRow ( [ 'String' , analysis_report . GetString ( ) ] )
table_view . Write ( self . _output_writer ) |
def _interpolate_with_fill ( self , method = 'pad' , axis = 0 , inplace = False , limit = None , fill_value = None , coerce = False , downcast = None ) :
"""fillna but using the interpolate machinery""" | inplace = validate_bool_kwarg ( inplace , 'inplace' )
# if we are coercing , then don ' t force the conversion
# if the block can ' t hold the type
if coerce :
if not self . _can_hold_na :
if inplace :
return [ self ]
else :
return [ self . copy ( ) ]
values = self . values if inplace else self . values . copy ( )
values , fill_value = self . _try_coerce_args ( values , fill_value )
values = missing . interpolate_2d ( values , method = method , axis = axis , limit = limit , fill_value = fill_value , dtype = self . dtype )
values = self . _try_coerce_result ( values )
blocks = [ self . make_block_same_class ( values , ndim = self . ndim ) ]
return self . _maybe_downcast ( blocks , downcast ) |
def import_from_dict ( session , data , sync = [ ] ) :
"""Imports databases and druid clusters from dictionary""" | if isinstance ( data , dict ) :
logging . info ( 'Importing %d %s' , len ( data . get ( DATABASES_KEY , [ ] ) ) , DATABASES_KEY )
for database in data . get ( DATABASES_KEY , [ ] ) :
Database . import_from_dict ( session , database , sync = sync )
logging . info ( 'Importing %d %s' , len ( data . get ( DRUID_CLUSTERS_KEY , [ ] ) ) , DRUID_CLUSTERS_KEY )
for datasource in data . get ( DRUID_CLUSTERS_KEY , [ ] ) :
DruidCluster . import_from_dict ( session , datasource , sync = sync )
session . commit ( )
else :
logging . info ( 'Supplied object is not a dictionary.' ) |
def ParseTextToDicts ( self , * args , ** kwargs ) :
"""Calls ParseText and turns the result into list of dicts .
List items are dicts of rows , dict key is column header and value is column
value .
Args :
text : ( str ) , Text to parse with embedded newlines .
eof : ( boolean ) , Set to False if we are parsing only part of the file .
Suppresses triggering EOF state .
Raises :
TextFSMError : An error occurred within the FSM .
Returns :
List of dicts .""" | result_lists = self . ParseText ( * args , ** kwargs )
result_dicts = [ ]
for row in result_lists :
result_dicts . append ( dict ( zip ( self . header , row ) ) )
return result_dicts |
def hash_file ( filename ) :
"""Hash a file using the same method the registry uses ( currently SHA - 256 ) .
: param filename : Name of file to hash
: type filename : str
: rtype : str
: returns : Hex - encoded hash of file ' s content ( prefixed by ` ` sha256 : ` ` )""" | sha256 = hashlib . sha256 ( )
with open ( filename , 'rb' ) as f :
for chunk in iter ( lambda : f . read ( 8192 ) , b'' ) :
sha256 . update ( chunk )
return 'sha256:' + sha256 . hexdigest ( ) |
def build_ports_dict ( nsg , direction_key , ip_protocol ) :
"""Build entire ports array filled with True ( Allow ) , False ( Deny ) and None ( default - Deny )
based on the provided Network Security Group object , direction and protocol .""" | rules = nsg [ 'properties' ] [ 'securityRules' ]
rules = sorted ( rules , key = lambda k : k [ 'properties' ] [ 'priority' ] )
ports = { }
for rule in rules : # Skip rules with different direction
if not StringUtils . equal ( direction_key , rule [ 'properties' ] [ 'direction' ] ) :
continue
# Check the protocol : possible values are ' TCP ' , ' UDP ' , ' * ' ( both )
# Skip only if rule and ip _ protocol are ' TCP ' / ' UDP ' pair .
protocol = rule [ 'properties' ] [ 'protocol' ]
if not StringUtils . equal ( protocol , "*" ) and not StringUtils . equal ( ip_protocol , "*" ) and not StringUtils . equal ( protocol , ip_protocol ) :
continue
IsAllowed = StringUtils . equal ( rule [ 'properties' ] [ 'access' ] , 'allow' )
ports_set = PortsRangeHelper . get_ports_set_from_rule ( rule )
for p in ports_set :
if p not in ports :
ports [ p ] = IsAllowed
return ports |
def _sample_item ( self , ** kwargs ) :
"""Sample an item from the pool according to the instrumental
distribution""" | loc = np . random . choice ( self . _n_items , p = self . _inst_pmf )
weight = ( 1 / self . _n_items ) / self . _inst_pmf [ loc ]
return loc , weight , { } |
def set_mlag_id ( self , name , value = None , default = False , disable = False ) :
"""Configures the interface mlag value for the specified interface
Args :
name ( str ) : The interface to configure . Valid values for the
name arg include Port - Channel *
value ( str ) : The mlag identifier to cofigure on the interface
default ( bool ) : Configures the interface mlag value using the
default keyword
disable ( bool ) : Negates the interface mlag value using the
no keyword
Returns :
bool : Returns True if the commands complete successfully""" | cmd = self . command_builder ( 'mlag' , value = value , default = default , disable = disable )
return self . configure_interface ( name , cmd ) |
def decorate_HTTP_verb_method ( method ) :
"""Prepare and Post - Process HTTP VERB method for BigIP - RESTServer request .
This function decorates all of the HTTP VERB methods in the
iControlRESTSession class . It provides the core logic for this module .
If necessary it validates and assembles a uri from parts with a call to
` generate _ bigip _ uri ` .
Then it :
1 . pre - logs the details of the request
2 . submits the request
3 . logs the response , included expected status codes
4 . raises exceptions for unexpected status codes . ( i . e . not doc ' d as BigIP
RESTServer codes . )""" | @ functools . wraps ( method )
def wrapper ( self , RIC_base_uri , ** kwargs ) :
partition = kwargs . pop ( 'partition' , '' )
sub_path = kwargs . pop ( 'subPath' , '' )
suffix = kwargs . pop ( 'suffix' , '' )
identifier , kwargs = _unique_resource_identifier_from_kwargs ( ** kwargs )
uri_as_parts = kwargs . pop ( 'uri_as_parts' , False )
transform_name = kwargs . pop ( 'transform_name' , False )
transform_subpath = kwargs . pop ( 'transform_subpath' , False )
if uri_as_parts :
REST_uri = generate_bigip_uri ( RIC_base_uri , partition , identifier , sub_path , suffix , transform_name = transform_name , transform_subpath = transform_subpath , ** kwargs )
else :
REST_uri = RIC_base_uri
pre_message = "%s WITH uri: %s AND suffix: %s AND kwargs: %s" % ( method . __name__ , REST_uri , suffix , kwargs )
logger = logging . getLogger ( __name__ )
logger . debug ( pre_message )
response = method ( self , REST_uri , ** kwargs )
post_message = "RESPONSE::STATUS: %s Content-Type: %s Content-Encoding:" " %s\nText: %r" % ( response . status_code , response . headers . get ( 'Content-Type' , None ) , response . headers . get ( 'Content-Encoding' , None ) , response . text )
logger . debug ( post_message )
if response . status_code not in range ( 200 , 207 ) :
error_message = '%s Unexpected Error: %s for uri: %s\nText: %r' % ( response . status_code , response . reason , response . url , response . text )
raise iControlUnexpectedHTTPError ( error_message , response = response )
return response
return wrapper |
def pull_log_dump ( self , project_name , logstore_name , from_time , to_time , file_path , batch_size = None , compress = None , encodings = None , shard_list = None , no_escape = None ) :
"""dump all logs seperatedly line into file _ path , file _ path , the time parameters are log received time on server side .
: type project _ name : string
: param project _ name : the Project name
: type logstore _ name : string
: param logstore _ name : the logstore name
: type from _ time : string / int
: param from _ time : curosr value , could be begin , timestamp or readable time in readable time like " % Y - % m - % d % H : % M : % S < time _ zone > " e . g . " 2018-01-02 12:12:10 + 8:00 " , also support human readable string , e . g . " 1 hour ago " , " now " , " yesterday 0:0:0 " , refer to https : / / aliyun - log - cli . readthedocs . io / en / latest / tutorials / tutorial _ human _ readable _ datetime . html
: type to _ time : string / int
: param to _ time : curosr value , could be begin , timestamp or readable time in readable time like " % Y - % m - % d % H : % M : % S < time _ zone > " e . g . " 2018-01-02 12:12:10 + 8:00 " , also support human readable string , e . g . " 1 hour ago " , " now " , " yesterday 0:0:0 " , refer to https : / / aliyun - log - cli . readthedocs . io / en / latest / tutorials / tutorial _ human _ readable _ datetime . html
: type file _ path : string
: param file _ path : file path with { } for shard id . e . g . " / data / dump _ { } . data " , { } will be replaced with each partition .
: type batch _ size : int
: param batch _ size : batch size to fetch the data in each iteration . by default it ' s 500
: type compress : bool
: param compress : if use compression , by default it ' s True
: type encodings : string list
: param encodings : encoding like [ " utf8 " , " latin1 " ] etc to dumps the logs in json format to file . default is [ " utf8 " , ]
: type shard _ list : string
: param shard _ list : shard number list . could be comma seperated list or range : 1,20,31-40
: type no _ escape : bool
: param no _ escape : if not _ escape the non - ANSI , default is to escape , set it to True if don ' t want it .
: return : LogResponse { " total _ count " : 30 , " files " : { ' file _ path _ 1 ' : 10 , " file _ path _ 2 " : 20 } } )
: raise : LogException""" | file_path = file_path . replace ( "{}" , "{0}" )
if "{0}" not in file_path :
file_path += "{0}"
return pull_log_dump ( self , project_name , logstore_name , from_time , to_time , file_path , batch_size = batch_size , compress = compress , encodings = encodings , shard_list = shard_list , no_escape = no_escape ) |
def argvquote ( arg , force = False ) :
"""Returns an argument quoted in such a way that that CommandLineToArgvW
on Windows will return the argument string unchanged .
This is the same thing Popen does when supplied with an list of arguments .
Arguments in a command line should be separated by spaces ; this
function does not add these spaces . This implementation follows the
suggestions outlined here :
https : / / blogs . msdn . microsoft . com / twistylittlepassagesallalike / 2011/04/23 / everyone - quotes - command - line - arguments - the - wrong - way /""" | if not force and len ( arg ) != 0 and not any ( [ c in arg for c in ' \t\n\v"' ] ) :
return arg
else :
n_backslashes = 0
cmdline = '"'
for c in arg :
if c == "\\" : # first count the number of current backslashes
n_backslashes += 1
continue
if c == '"' : # Escape all backslashes and the following double quotation mark
cmdline += ( n_backslashes * 2 + 1 ) * '\\'
else : # backslashes are not special here
cmdline += n_backslashes * '\\'
n_backslashes = 0
cmdline += c
# Escape all backslashes , but let the terminating
# double quotation mark we add below be interpreted
# as a metacharacter
cmdline += + n_backslashes * 2 * '\\' + '"'
return cmdline |
def get_service_location_info ( self , service_location_id ) :
"""Request service location info
Parameters
service _ location _ id : int
Returns
dict""" | url = urljoin ( URLS [ 'servicelocation' ] , service_location_id , "info" )
headers = { "Authorization" : "Bearer {}" . format ( self . access_token ) }
r = requests . get ( url , headers = headers )
r . raise_for_status ( )
return r . json ( ) |
def _query_account_key ( cli_ctx , account_name ) :
"""Query the storage account key . This is used when the customer doesn ' t offer account key but name .""" | rg , scf = _query_account_rg ( cli_ctx , account_name )
t_storage_account_keys = get_sdk ( cli_ctx , CUSTOM_MGMT_STORAGE , 'models.storage_account_keys#StorageAccountKeys' )
if t_storage_account_keys :
return scf . storage_accounts . list_keys ( rg , account_name ) . key1
# of type : models . storage _ account _ list _ keys _ result # StorageAccountListKeysResult
return scf . storage_accounts . list_keys ( rg , account_name ) . keys [ 0 ] . value |
async def sunionstore ( self , dest , keys , * args ) :
"""Store the union of sets specified by ` ` keys ` ` into a new
set named ` ` dest ` ` . Returns the number of keys in the new set .""" | args = list_or_args ( keys , args )
return await self . execute_command ( 'SUNIONSTORE' , dest , * args ) |
def set ( self , k , v , obj = 'override' ) :
'obj is a Sheet instance , or a Sheet [ sub ] class . obj = " override " means override all ; obj = " default " means last resort .' | if k not in self :
self [ k ] = dict ( )
self [ k ] [ self . objname ( obj ) ] = v
return v |
def sanitize_http_wsgi_env ( client , event ) :
"""Sanitizes WSGI environment variables
: param client : an ElasticAPM client
: param event : a transaction or error event
: return : The modified event""" | try :
env = event [ "context" ] [ "request" ] [ "env" ]
event [ "context" ] [ "request" ] [ "env" ] = varmap ( _sanitize , env )
except ( KeyError , TypeError ) :
pass
return event |
def draw ( self ) :
"""Draws the feature correlation to dependent variable , called from fit .""" | pos = np . arange ( self . scores_ . shape [ 0 ] ) + 0.5
self . ax . barh ( pos , self . scores_ )
# Set the labels for the bars
self . ax . set_yticks ( pos )
self . ax . set_yticklabels ( self . features_ )
return self . ax |
def get ( self , url ) :
"""Lookup the given url and return an entry if found . Else return None .
The returned entry is a dict with metadata about the content and a ' content ' key with a file path value
: param url :
: return :""" | self . _load ( url )
self . _flush ( )
return self . metadata [ url ] |
def create_widget ( self ) :
"""Create the underlying widget .""" | d = self . declaration
self . widget = Icon ( self . get_context ( ) , None , d . style ) |
def cache_parameter_group_present ( name , region = None , key = None , keyid = None , profile = None , ** args ) :
'''Ensure cache parameter group exists .
name
A name for the cache parameter group .
CacheParameterGroupName
A name for the cache parameter group .
Note : In general this parameter is not needed , as ' name ' is used if it ' s not provided .
CacheParameterGroupFamily
The name of the cache parameter group family that the cache parameter group can be used with .
Valid values are :
- memcached1.4
- redis2.6
- redis2.8
- redis3.2
Description
A description for the cache parameter group .
ParameterNameValues
A newly created CacheParameterGroup is an exact duplicate of the default parameter group
of the requested CacheParameterGroupFamily . This option provides a way to fine - tune
these settings . It is formatted as [ list ] of { dicts } , each composed of a parameter name
and a value .
. . code - block : : yaml
ParameterNameValues :
- ParameterName : timeout
# Amazon requires ALL VALUES to be strings . . .
ParameterValue : " 30"
- ParameterName : appendonly
# The YAML parser will turn a bare ` yes ` into a bool , which Amazon will then throw on . . .
ParameterValue : " yes "
region
Region to connect to .
key
Secret key to be used .
keyid
Access key to be used .
profile
A dict with region , key and keyid , or a pillar key ( string ) that
contains a dict with region , key and keyid .''' | ret = { 'name' : name , 'result' : True , 'comment' : '' , 'changes' : { } }
args = dict ( [ ( k , v ) for k , v in args . items ( ) if not k . startswith ( '_' ) ] )
tunables = args . pop ( 'ParameterNameValues' , None )
current = __salt__ [ 'boto3_elasticache.describe_cache_parameter_groups' ] ( name , region = region , key = key , keyid = keyid , profile = profile )
if not current :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Cache parameter group `{}` would be created.' . format ( name )
ret [ 'result' ] = None
return ret
created = __salt__ [ 'boto3_elasticache.create_cache_parameter_group' ] ( name , region = region , key = key , keyid = keyid , profile = profile , ** args )
if not created :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to create cache parameter group `{}`.' . format ( name )
return ret
new = __salt__ [ 'boto3_elasticache.describe_cache_parameter_groups' ] ( name , region = region , key = key , keyid = keyid , profile = profile )
new = new [ 0 ]
# Set any custom cache parameters requested . . .
if tunables :
kwargs = { 'ParameterNameValues' : tunables }
updated = __salt__ [ 'boto3_elasticache.modify_cache_parameter_groups' ] ( name , region = region , key = key , keyid = keyid , profile = profile , ** kwargs )
if not updated :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to update new cache parameter group `{}`.' . format ( name )
return ret
kwargs = { 'Source' : 'user' }
new [ 'ParameterNameValues' ] = __salt__ [ 'boto3_elasticache.describe_cache_parameters' ] ( name , region = region , key = key , keyid = keyid , profile = profile , ** kwargs )
ret [ 'comment' ] = 'Cache parameter group `{}` was created.' . format ( name )
ret [ 'changes' ] [ 'old' ] = None
ret [ 'changes' ] [ 'new' ] = new
else :
if not tunables :
ret [ 'comment' ] = 'Cache parameter group `{}` exists.' . format ( name )
else :
oldvals = [ ]
newvals = [ ]
curr_params = __salt__ [ 'boto3_elasticache.describe_cache_parameters' ] ( name , region = region , key = key , keyid = keyid , profile = profile )
# Note that the items under CacheNodeTypeSpecificParameters are never modifiable , so
# we ignore them completely .
curr_kvs = { p [ 'ParameterName' ] : p [ 'ParameterValue' ] for p in curr_params [ 'Parameters' ] }
req_kvs = { p [ 'ParameterName' ] : p [ 'ParameterValue' ] for p in tunables }
for pname , pval in req_kvs . items ( ) :
if pname in curr_kvs and pval != curr_kvs [ pname ] :
oldvals += [ { 'ParameterName' : pname , 'ParameterValue' : curr_kvs [ pname ] } ]
newvals += [ { 'ParameterName' : pname , 'ParameterValue' : pval } ]
if newvals :
if __opts__ [ 'test' ] :
ret [ 'comment' ] = 'Cache parameter group `{}` would be updated.' . format ( name )
ret [ 'result' ] = None
ret [ 'changes' ] [ 'old' ] = current [ 0 ]
ret [ 'changes' ] [ 'old' ] [ 'ParameterNameValues' ] = oldvals
ret [ 'changes' ] [ 'new' ] = deepcopy ( current [ 0 ] )
ret [ 'changes' ] [ 'new' ] [ 'ParameterNameValues' ] = newvals
return ret
kwargs = { 'ParameterNameValues' : newvals }
if not __salt__ [ 'boto3_elasticache.modify_cache_parameter_groups' ] ( name , region = region , key = key , keyid = keyid , profile = profile , ** kwargs ) :
ret [ 'result' ] = False
ret [ 'comment' ] = 'Failed to update cache parameter group `{}`.' . format ( name )
return ret
ret [ 'changes' ] [ 'old' ] = current [ 0 ]
ret [ 'changes' ] [ 'old' ] [ 'ParameterNameValues' ] = oldvals
ret [ 'changes' ] [ 'new' ] = deepcopy ( current [ 0 ] )
ret [ 'changes' ] [ 'new' ] [ 'ParameterNameValues' ] = newvals
else :
ret [ 'comment' ] = 'Cache parameter group `{}` is in the desired state.' . format ( name )
return ret |
def delete ( self , callback = None , errback = None ) :
"""Delete the Network and all associated addresses""" | return self . _rest . delete ( self . id , callback = callback , errback = errback ) |
def _create ( self , ** kwargs ) :
"""wrapped by ` create ` override that in subclasses to customize""" | if 'uri' in self . _meta_data :
error = "There was an attempt to assign a new uri to this " "resource, the _meta_data['uri'] is %s and it should" " not be changed." % ( self . _meta_data [ 'uri' ] )
raise URICreationCollision ( error )
self . _check_exclusive_parameters ( ** kwargs )
requests_params = self . _handle_requests_params ( kwargs )
self . _minimum_one_is_missing ( ** kwargs )
self . _check_create_parameters ( ** kwargs )
kwargs = self . _check_for_python_keywords ( kwargs )
# Reduce boolean pairs as specified by the meta _ data entry below
for key1 , key2 in self . _meta_data [ 'reduction_forcing_pairs' ] :
kwargs = self . _reduce_boolean_pair ( kwargs , key1 , key2 )
# Make convenience variable with short names for this method .
_create_uri = self . _meta_data [ 'container' ] . _meta_data [ 'uri' ]
session = self . _meta_data [ 'bigip' ] . _meta_data [ 'icr_session' ]
kwargs = self . _prepare_request_json ( kwargs )
# Invoke the REST operation on the device .
response = session . post ( _create_uri , json = kwargs , ** requests_params )
# Make new instance of self
result = self . _produce_instance ( response )
return result |
def stem ( self , word ) :
"""Stem a Dutch word and return the stemmed form .
: param word : The word that is stemmed .
: type word : str or unicode
: return : The stemmed form .
: rtype : unicode""" | word = word . lower ( )
step2_success = False
# Vowel accents are removed .
word = ( word . replace ( "\xE4" , "a" ) . replace ( "\xE1" , "a" ) . replace ( "\xEB" , "e" ) . replace ( "\xE9" , "e" ) . replace ( "\xED" , "i" ) . replace ( "\xEF" , "i" ) . replace ( "\xF6" , "o" ) . replace ( "\xF3" , "o" ) . replace ( "\xFC" , "u" ) . replace ( "\xFA" , "u" ) )
# An initial ' y ' , a ' y ' after a vowel ,
# and an ' i ' between self . _ _ vowels is put into upper case .
# As from now these are treated as consonants .
if word . startswith ( "y" ) :
word = "" . join ( ( "Y" , word [ 1 : ] ) )
for i in range ( 1 , len ( word ) ) :
if word [ i - 1 ] in self . __vowels and word [ i ] == "y" :
word = "" . join ( ( word [ : i ] , "Y" , word [ i + 1 : ] ) )
for i in range ( 1 , len ( word ) - 1 ) :
if ( word [ i - 1 ] in self . __vowels and word [ i ] == "i" and word [ i + 1 ] in self . __vowels ) :
word = "" . join ( ( word [ : i ] , "I" , word [ i + 1 : ] ) )
r1 , r2 = self . _r1r2_standard ( word , self . __vowels )
# R1 is adjusted so that the region before it
# contains at least 3 letters .
for i in range ( 1 , len ( word ) ) :
if word [ i ] not in self . __vowels and word [ i - 1 ] in self . __vowels :
if len ( word [ : i + 1 ] ) < 3 and len ( word [ : i + 1 ] ) > 0 :
r1 = word [ 3 : ]
elif len ( word [ : i + 1 ] ) == 0 :
return word
break
# STEP 1
for suffix in self . __step1_suffixes :
if r1 . endswith ( suffix ) :
if suffix == "heden" :
word = "" . join ( ( word [ : - 5 ] , "heid" ) )
r1 = "" . join ( ( r1 [ : - 5 ] , "heid" ) )
if r2 . endswith ( "heden" ) :
r2 = "" . join ( ( r2 [ : - 5 ] , "heid" ) )
elif ( suffix in ( "ene" , "en" ) and not word . endswith ( "heden" ) and word [ - len ( suffix ) - 1 ] not in self . __vowels and word [ - len ( suffix ) - 3 : - len ( suffix ) ] != "gem" ) :
word = word [ : - len ( suffix ) ]
r1 = r1 [ : - len ( suffix ) ]
r2 = r2 [ : - len ( suffix ) ]
if word . endswith ( ( "kk" , "dd" , "tt" ) ) :
word = word [ : - 1 ]
r1 = r1 [ : - 1 ]
r2 = r2 [ : - 1 ]
elif ( suffix in ( "se" , "s" ) and word [ - len ( suffix ) - 1 ] not in self . __vowels and word [ - len ( suffix ) - 1 ] != "j" ) :
word = word [ : - len ( suffix ) ]
r1 = r1 [ : - len ( suffix ) ]
r2 = r2 [ : - len ( suffix ) ]
break
# STEP 2
if r1 . endswith ( "e" ) and word [ - 2 ] not in self . __vowels :
step2_success = True
word = word [ : - 1 ]
r1 = r1 [ : - 1 ]
r2 = r2 [ : - 1 ]
if word . endswith ( ( "kk" , "dd" , "tt" ) ) :
word = word [ : - 1 ]
r1 = r1 [ : - 1 ]
r2 = r2 [ : - 1 ]
# STEP 3a
if r2 . endswith ( "heid" ) and word [ - 5 ] != "c" :
word = word [ : - 4 ]
r1 = r1 [ : - 4 ]
r2 = r2 [ : - 4 ]
if ( r1 . endswith ( "en" ) and word [ - 3 ] not in self . __vowels and word [ - 5 : - 2 ] != "gem" ) :
word = word [ : - 2 ]
r1 = r1 [ : - 2 ]
r2 = r2 [ : - 2 ]
if word . endswith ( ( "kk" , "dd" , "tt" ) ) :
word = word [ : - 1 ]
r1 = r1 [ : - 1 ]
r2 = r2 [ : - 1 ]
# STEP 3b : Derivational suffixes
for suffix in self . __step3b_suffixes :
if r2 . endswith ( suffix ) :
if suffix in ( "end" , "ing" ) :
word = word [ : - 3 ]
r2 = r2 [ : - 3 ]
if r2 . endswith ( "ig" ) and word [ - 3 ] != "e" :
word = word [ : - 2 ]
else :
if word . endswith ( ( "kk" , "dd" , "tt" ) ) :
word = word [ : - 1 ]
elif suffix == "ig" and word [ - 3 ] != "e" :
word = word [ : - 2 ]
elif suffix == "lijk" :
word = word [ : - 4 ]
r1 = r1 [ : - 4 ]
if r1 . endswith ( "e" ) and word [ - 2 ] not in self . __vowels :
word = word [ : - 1 ]
if word . endswith ( ( "kk" , "dd" , "tt" ) ) :
word = word [ : - 1 ]
elif suffix == "baar" :
word = word [ : - 4 ]
elif suffix == "bar" and step2_success :
word = word [ : - 3 ]
break
# STEP 4 : Undouble vowel
if len ( word ) >= 4 :
if word [ - 1 ] not in self . __vowels and word [ - 1 ] != "I" :
if word [ - 3 : - 1 ] in ( "aa" , "ee" , "oo" , "uu" ) :
if word [ - 4 ] not in self . __vowels :
word = "" . join ( ( word [ : - 3 ] , word [ - 3 ] , word [ - 1 ] ) )
# All occurrences of ' I ' and ' Y ' are put back into lower case .
word = word . replace ( "I" , "i" ) . replace ( "Y" , "y" )
return word |
def Add_text ( self ) :
"""Add measurement data lines to the text window .""" | self . selected_meas = [ ]
if self . COORDINATE_SYSTEM == 'geographic' :
zijdblock = self . Data [ self . s ] [ 'zijdblock_geo' ]
elif self . COORDINATE_SYSTEM == 'tilt-corrected' :
zijdblock = self . Data [ self . s ] [ 'zijdblock_tilt' ]
else :
zijdblock = self . Data [ self . s ] [ 'zijdblock' ]
tmin_index , tmax_index = - 1 , - 1
if self . current_fit and self . current_fit . tmin and self . current_fit . tmax :
tmin_index , tmax_index = self . get_indices ( self . current_fit )
TEXT = ""
self . logger . DeleteAllItems ( )
for i in range ( len ( zijdblock ) ) :
lab_treatment = self . Data [ self . s ] [ 'zijdblock_lab_treatments' ] [ i ]
Step = ""
methods = lab_treatment . split ( '-' )
if "NO" in methods :
Step = "N"
elif "AF" in methods :
Step = "AF"
elif "ARM" in methods :
Step = "ARM"
elif "IRM" in methods :
Step = "IRM"
elif "T" in methods :
Step = "T"
elif "LT" in methods :
Step = "LT"
Tr = zijdblock [ i ] [ 0 ]
Dec = zijdblock [ i ] [ 1 ]
Inc = zijdblock [ i ] [ 2 ]
Int = zijdblock [ i ] [ 3 ]
csd = self . Data [ self . s ] [ 'csds' ] [ i ]
self . logger . InsertItem ( i , "%i" % i )
self . logger . SetItem ( i , 1 , Step )
self . logger . SetItem ( i , 2 , "%.1f" % Tr )
self . logger . SetItem ( i , 3 , "%.1f" % Dec )
self . logger . SetItem ( i , 4 , "%.1f" % Inc )
self . logger . SetItem ( i , 5 , "%.2e" % Int )
self . logger . SetItem ( i , 6 , csd )
self . logger . SetItemBackgroundColour ( i , "WHITE" )
if i >= tmin_index and i <= tmax_index :
self . logger . SetItemBackgroundColour ( i , "LIGHT BLUE" )
if self . Data [ self . s ] [ 'measurement_flag' ] [ i ] == 'b' :
self . logger . SetItemBackgroundColour ( i , "red" ) |
def sphere_ball_intersection ( R , r ) :
"""Compute the surface area of the intersection of sphere of radius R centered
at ( 0 , 0 , 0 ) with a ball of radius r centered at ( R , 0 , 0 ) .
Parameters
R : float , sphere radius
r : float , ball radius
Returns
area : float , the surface are .""" | x = ( 2 * R ** 2 - r ** 2 ) / ( 2 * R )
# x coord of plane
if x >= - R :
return 2 * np . pi * R * ( R - x )
if x < - R :
return 4 * np . pi * R ** 2 |
def native_api_call ( self , service , method , data , options , multipart_form = False , multipart_form_data = None , stream = False , http_path = "/api/meta/v1/" , http_method = 'POST' , get_params = None , connect_timeout_sec = 60 ) :
""": type app : metasdk . MetaApp
: rtype : requests . Response""" | if get_params is None :
get_params = { }
if 'self' in data : # может не быть , если вызывается напрямую из кода ,
# а не из прослоек типа DbQueryService
data . pop ( "self" )
if options :
data . update ( options )
_headers = dict ( self . __default_headers )
if self . auth_user_id :
_headers [ 'X-META-AuthUserID' ] = str ( self . auth_user_id )
request = { "url" : self . meta_url + http_path + service + "/" + method , "timeout" : ( connect_timeout_sec , 1800 ) , "stream" : stream , "params" : get_params , }
if multipart_form :
if multipart_form_data :
request [ 'files' ] = multipart_form_data
request [ 'data' ] = data
_headers . pop ( 'content-type' , None )
else :
request [ 'data' ] = json . dumps ( data )
request [ 'headers' ] = _headers
for _try_idx in range ( 20 ) :
try :
resp = requests . request ( http_method , ** request )
if resp . status_code == 200 :
return resp
else :
process_meta_api_error_code ( resp . status_code , request , resp . text )
except ( requests . exceptions . ConnectionError , ConnectionError , TimeoutError ) as e :
self . log . warning ( 'META API Connection Error. Sleep...' , { "e" : e } )
time . sleep ( 15 )
except Exception as e :
if 'Служба частично или полностью недоступна' in str ( e ) :
self . log . warning ( 'META API Service Temporarily Unavailable. Sleep...' , { "e" : e } )
time . sleep ( 15 )
else :
raise e
raise ServerError ( request ) |
def load_system_config ( config_file = None , work_dir = None , allow_missing = False ) :
"""Load bcbio _ system . yaml configuration file , handling standard defaults .
Looks for configuration file in default location within
final base directory from a standard installation . Handles both standard
installs ( galaxy / bcbio _ system . yaml ) and docker installs ( config / bcbio _ system . yaml ) .""" | docker_config = _get_docker_config ( )
if config_file is None :
config_file = "bcbio_system.yaml"
if not os . path . exists ( config_file ) :
base_dir = get_base_installdir ( )
test_config = os . path . join ( base_dir , "galaxy" , config_file )
if os . path . exists ( test_config ) :
config_file = test_config
elif allow_missing :
config_file = None
else :
raise ValueError ( "Could not find input system configuration file %s, " "including inside standard directory %s" % ( config_file , os . path . join ( base_dir , "galaxy" ) ) )
config = load_config ( config_file ) if config_file else { }
if docker_config :
assert work_dir is not None , "Need working directory to merge docker config"
config_file = os . path . join ( work_dir , "%s-merged%s" % os . path . splitext ( os . path . basename ( config_file ) ) )
config = _merge_system_configs ( config , docker_config , config_file )
if "algorithm" not in config :
config [ "algorithm" ] = { }
config [ "bcbio_system" ] = config_file
return config , config_file |
def create_tempdir ( suffix = '' , prefix = 'tmp' , directory = None , delete = True ) :
"""Create a tempdir and return the path .
This function registers the new temporary directory
for deletion with the atexit module .""" | tempd = tempfile . mkdtemp ( suffix = suffix , prefix = prefix , dir = directory )
if delete :
atexit . register ( _cleanup_tempdir , tempd )
return tempd |
def make_fileitem_peinfo_type ( petype , condition = 'is' , negate = False , preserve_case = False ) :
"""Create a node for FileItem / PEInfo / Type
: return : A IndicatorItem represented as an Element node""" | document = 'FileItem'
search = 'FileItem/PEInfo/Type'
content_type = 'string'
content = petype
ii_node = ioc_api . make_indicatoritem_node ( condition , document , search , content_type , content , negate = negate , preserve_case = preserve_case )
return ii_node |
def get_latent_pred_loss ( latents_pred , latents_discrete_hot , hparams ) :
"""Latent prediction and loss .""" | latents_logits = tf . layers . dense ( latents_pred , 2 ** hparams . bottleneck_bits , name = "extra_logits" )
loss = tf . nn . softmax_cross_entropy_with_logits_v2 ( labels = tf . stop_gradient ( latents_discrete_hot ) , logits = latents_logits )
return loss |
def parent ( self ) :
"""Select the direct child ( ren ) from the UI element ( s ) given by the query expression , see ` ` QueryCondition ` ` for
more details about the selectors .
Warnings :
Experimental method , may not be available for all drivers .
Returns :
: py : class : ` UIObjectProxy < poco . proxy . UIObjectProxy > ` : a new UI proxy object representing the direct parent
of the first UI element .""" | sub_query = build_query ( None )
# as placeholder
query = ( '^' , ( self . query , sub_query ) )
obj = UIObjectProxy ( self . poco )
obj . query = query
return obj |
def eval_py ( self , _globals , _locals ) :
"""Evaluates a file containing a Python params dictionary .""" | try :
params = eval ( self . script , _globals , _locals )
except NameError as e :
raise Exception ( 'Failed to evaluate parameters: {}' . format ( str ( e ) ) )
except ResolutionError as e :
raise Exception ( 'GetOutput: {}' . format ( str ( e ) ) )
return params |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.