signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def unpad ( cls , data ) :
"""Unpads data that has been padded""" | if sys . version_info > ( 3 , 0 ) :
return data [ : - ord ( data [ len ( data ) - 1 : ] ) ] . decode ( )
else :
return data [ : - ord ( data [ len ( data ) - 1 : ] ) ] |
def qualified_name ( self ) :
"""Get the qualified name of the variant .
Returns :
str : Name of the variant with version and index , eg " maya - 2016.1[1 ] " .""" | idxstr = '' if self . index is None else str ( self . index )
return "%s[%s]" % ( self . qualified_package_name , idxstr ) |
def tile_x_size ( self , zoom ) :
"""Width of a tile in SRID units at zoom level .
- zoom : zoom level""" | warnings . warn ( DeprecationWarning ( "tile_x_size is deprecated" ) )
validate_zoom ( zoom )
return round ( self . x_size / self . matrix_width ( zoom ) , ROUND ) |
def body_as_json ( self , encoding = 'UTF-8' ) :
"""The body of the event loaded as a JSON object is the data is compatible .
: param encoding : The encoding to use for decoding message data .
Default is ' UTF - 8'
: rtype : dict""" | data_str = self . body_as_str ( encoding = encoding )
try :
return json . loads ( data_str )
except Exception as e :
raise TypeError ( "Event data is not compatible with JSON type: {}" . format ( e ) ) |
def verify_record_permission ( permission_factory , record ) :
"""Check that the current user has the required permissions on record .
In case the permission check fails , an Flask abort is launched .
If the user was previously logged - in , a HTTP error 403 is returned .
Otherwise , is returned a HTTP error 401.
: param permission _ factory : permission factory used to check permissions .
: param record : record whose access is limited .""" | # Note , cannot be done in one line due overloading of boolean
# operations permission object .
if not permission_factory ( record = record ) . can ( ) :
from flask_login import current_user
if not current_user . is_authenticated :
abort ( 401 )
abort ( 403 ) |
def rn ( x , af , rate ) :
"""R ( n ) ratio for noise identification
ration of MVAR to AVAR""" | ( taus , devs , errs , ns ) = at . adev ( x , taus = [ af * rate ] , data_type = 'phase' , rate = rate )
oadev_x = devs [ 0 ]
( mtaus , mdevs , errs , ns ) = at . mdev ( x , taus = [ af * rate ] , data_type = 'phase' , rate = rate )
mdev_x = mdevs [ 0 ]
rn = pow ( mdev_x / oadev_x , 2 )
return rn |
def split ( self , tValues ) :
"""Split the segment according the t values""" | if self . segmentType == "curve" :
on1 = self . previousOnCurve
off1 = self . points [ 0 ] . coordinates
off2 = self . points [ 1 ] . coordinates
on2 = self . points [ 2 ] . coordinates
return bezierTools . splitCubicAtT ( on1 , off1 , off2 , on2 , * tValues )
elif self . segmentType == "line" :
segments = [ ]
x1 , y1 = self . previousOnCurve
x2 , y2 = self . points [ 0 ] . coordinates
dx = x2 - x1
dy = y2 - y1
pp = x1 , y1
for t in tValues :
np = ( x1 + dx * t , y1 + dy * t )
segments . append ( [ pp , np ] )
pp = np
segments . append ( [ pp , ( x2 , y2 ) ] )
return segments
elif self . segmentType == "qcurve" :
raise NotImplementedError
else :
raise NotImplementedError |
def delete_bank ( self , bank ) :
"""Delete the bank ' s file
: param Bank bank : Bank that will be removed""" | path = self . _bank_path ( bank )
Persistence . delete ( path ) |
import heapq
def sort_using_heap ( numbers ) :
"""Sorts a list of elements in ascending order using heap sort algorithm .
This function uses Python ' s built - in ` heapq ` module to sort the list .
The sorted list is popped off the heap one by one .
Examples :
> > > sort _ using _ heap ( [ 18 , 14 , 10 , 9 , 8 , 7 , 9 , 3 , 2 , 4 , 1 ] )
[1 , 2 , 3 , 4 , 7 , 8 , 9 , 9 , 10 , 14 , 18]
> > > sort _ using _ heap ( [ 25 , 35 , 22 , 85 , 14 , 65 , 75 , 25 , 58 ] )
[14 , 22 , 25 , 25 , 35 , 58 , 65 , 75 , 85]
> > > sort _ using _ heap ( [ 1 , 3 , 5 , 7 , 9 , 2 , 4 , 6 , 8 , 0 ] )
[0 , 1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9]
Args :
numbers ( list ) : List of integers that needs to be sorted .
Returns :
list : Sorted list of integers in ascending order .""" | heapq . heapify ( numbers )
sorted_list = [ heapq . heappop ( numbers ) for _ in range ( len ( numbers ) ) ]
return sorted_list |
def OpenPathWithStub ( path , stub ) :
"""Open the specified path using HTTP , using the host / port / protocol
associated with the specified stub . If the stub has a session cookie ,
it is included with the HTTP request . Returns the response as a
file - like object .""" | from six . moves import http_client
if not hasattr ( stub , 'scheme' ) :
raise vmodl . fault . NotSupported ( )
elif stub . scheme == http_client . HTTPConnection :
protocol = 'http'
elif stub . scheme == http_client . HTTPSConnection :
protocol = 'https'
else :
raise vmodl . fault . NotSupported ( )
hostPort = stub . host
url = '%s://%s%s' % ( protocol , hostPort , path )
headers = { }
if stub . cookie :
headers [ "Cookie" ] = stub . cookie
return requests . get ( url , headers = headers , verify = False ) |
def __cutWithOutMethod ( self , oiraw , cut_method , text = True ) :
'''分词 , 先将原始句子split为一个数组 , 之后遍历每一行 , 调用对单行分词的函数 ( 有两种 ) 。
text = True会返回分词好的字符串 , 为False则会返回一个二位数组方便用户做后续处理 。
函数中有一些细节处理主要是用于规范输出格式''' | oiraw = oiraw . split ( '\n' )
txt = ""
array = [ ]
if ( text ) :
for line in oiraw :
if ( self . __seg_only ) :
temp_txt = reduce ( lambda x , y : x + ' ' + y if y != " " else x , cut_method ( line ) , '' ) + '\n'
else :
temp_txt = reduce ( lambda x , y : x + ' ' + "" . join ( y ) , cut_method ( line ) , '' ) + '\n'
txt += temp_txt [ 1 : ]
return txt [ : - 1 ]
else :
for line in oiraw :
if ( line ) :
if ( self . __seg_only ) :
array += ( reduce ( lambda x , y : x + [ [ y , '' ] ] , cut_method ( line ) , [ ] ) )
else :
array += ( reduce ( lambda x , y : x + [ [ y [ 0 ] , y [ 2 ] ] ] , cut_method ( line ) , [ ] ) )
array += [ [ '\n' , '' ] ]
return array [ : - 1 ] |
def gf_mult_noLUT ( x , y , prim = 0 , field_charac_full = 256 , carryless = True ) :
'''Galois Field integer multiplication using Russian Peasant Multiplication algorithm ( faster than the standard multiplication + modular reduction ) .
If prim is 0 and carryless = False , then the function produces the result for a standard integers multiplication ( no carry - less arithmetics nor modular reduction ) .''' | r = 0
while y : # while y is above 0
if y & 1 :
r = r ^ x if carryless else r + x
# y is odd , then add the corresponding x to r ( the sum of all x ' s corresponding to odd y ' s will give the final product ) . Note that since we ' re in GF ( 2 ) , the addition is in fact an XOR ( very important because in GF ( 2 ) the multiplication and additions are carry - less , thus it changes the result ! ) .
y = y >> 1
# equivalent to y / / 2
x = x << 1
# equivalent to x * 2
if prim > 0 and x & field_charac_full :
x = x ^ prim
# GF modulo : if x > = 256 then apply modular reduction using the primitive polynomial ( we just substract , but since the primitive number can be above 256 then we directly XOR ) .
return r |
def get_updates ( self , offset = None , limit = None , timeout = None , allowed_updates = None ) :
"""Use this method to receive incoming updates using long polling ( wiki ) . An Array of Update objects is returned .
Notes1 . This method will not work if an outgoing webhook is set up . 2 . In order to avoid getting duplicate updates , recalculate offset after each server response .
https : / / core . telegram . org / bots / api # getupdates
Optional keyword parameters :
: param offset : Identifier of the first update to be returned . Must be greater by one than the highest among the identifiers of previously received updates . By default , updates starting with the earliest unconfirmed update are returned . An update is considered confirmed as soon as getUpdates is called with an offset higher than its update _ id . The negative offset can be specified to retrieve updates starting from - offset update from the end of the updates queue . All previous updates will forgotten .
: type offset : int
: param limit : Limits the number of updates to be retrieved . Values between 1—100 are accepted . Defaults to 100.
: type limit : int
: param timeout : Timeout in seconds for long polling . Defaults to 0 , i . e . usual short polling . Should be positive , short polling should be used for testing purposes only .
: type timeout : int
: param allowed _ updates : List the types of updates you want your bot to receive . For example , specify [ “ message ” , “ edited _ channel _ post ” , “ callback _ query ” ] to only receive updates of these types . See Update for a complete list of available update types . Specify an empty list to receive all updates regardless of type ( default ) . If not specified , the previous setting will be used . Please note that this parameter doesn ' t affect updates created before the call to the getUpdates , so unwanted updates may be received for a short period of time .
: type allowed _ updates : list of str | unicode
Returns :
: return : An Array of Update objects is returned
: rtype : list of pytgbot . api _ types . receivable . updates . Update""" | assert_type_or_raise ( offset , None , int , parameter_name = "offset" )
assert_type_or_raise ( limit , None , int , parameter_name = "limit" )
assert_type_or_raise ( timeout , None , int , parameter_name = "timeout" )
assert_type_or_raise ( allowed_updates , None , list , parameter_name = "allowed_updates" )
result = self . do ( "getUpdates" , offset = offset , limit = limit , timeout = timeout , allowed_updates = allowed_updates )
if self . return_python_objects :
logger . debug ( "Trying to parse {data}" . format ( data = repr ( result ) ) )
from pytgbot . api_types . receivable . updates import Update
try :
return Update . from_array_list ( result , list_level = 1 )
except TgApiParseException :
logger . debug ( "Failed parsing as api_type Update" , exc_info = True )
# end try
# no valid parsing so far
raise TgApiParseException ( "Could not parse result." )
# See debug log for details !
# end if return _ python _ objects
return result |
def file_md5sum ( filename ) :
'''Calculate and returns an MD5 checksum for the specified file . Any file
errors ( non - existent file , read error , etc . ) are not handled here but should
be caught where this method is called .
: param filename : full path to the file for which a checksum should be calculated
: returns : hex - digest formatted MD5 checksum as a string''' | # duplicated from keep . common . utils
# possibly at some point this should be moved to a common codebase / library
md5 = hashlib . md5 ( )
with open ( filename , 'rb' ) as filedata :
for chunk in iter ( lambda : filedata . read ( 128 * md5 . block_size ) , b'' ) :
md5 . update ( chunk )
return md5 . hexdigest ( ) |
def get_context_data ( self , ** kwargs ) :
'''Adds a ' base _ template ' attribute to context for the page _ detail to
extend from''' | context = super ( PageDetail , self ) . get_context_data ( ** kwargs )
page_base_template = "nupages/base.html"
# if MultiTenantMiddleware is used , use a base template specific to
# the tenants SITE _ ID
if hasattr ( self . request , 'site_id' ) :
page_base_template = select_template ( [ "nupages/tenants/{}/base.html" . format ( self . request . site_id ) , page_base_template ] )
context [ 'base_template' ] = page_base_template
return context |
def create_entry_line_from_text ( self , text ) :
"""Try to parse the given text line and extract and entry . Return an : class : ` ~ taxi . timesheet . lines . Entry `
object if parsing is successful , otherwise raise : exc : ` ~ taxi . exceptions . ParseError ` .""" | split_line = re . match ( self . entry_line_regexp , text )
if not split_line :
raise ParseError ( "Line must have an alias, a duration and a description" )
alias = split_line . group ( 'alias' )
start_time = end_time = None
if split_line . group ( 'start_time' ) is not None :
if split_line . group ( 'start_time' ) :
try :
start_time = create_time_from_text ( split_line . group ( 'start_time' ) )
except ValueError :
raise ParseError ( "Start time is not a valid time, it must be in format hh:mm or hhmm" )
else :
start_time = None
if split_line . group ( 'end_time' ) is not None :
if split_line . group ( 'end_time' ) == '?' :
end_time = None
else :
try :
end_time = create_time_from_text ( split_line . group ( 'end_time' ) )
except ValueError :
raise ParseError ( "End time is not a valid time, it must be in format hh:mm or hhmm" )
if split_line . group ( 'duration' ) is not None :
duration = float ( split_line . group ( 'duration' ) )
elif start_time or end_time :
duration = ( start_time , end_time )
else :
duration = ( None , None )
description = split_line . group ( 'description' )
# Parse and set line flags
if split_line . group ( 'flags' ) :
try :
flags = self . extract_flags_from_text ( split_line . group ( 'flags' ) )
# extract _ flags _ from _ text will raise ` KeyError ` if one of the flags is not recognized . This should never
# happen though as the list of accepted flags is bundled in self . entry _ line _ regexp
except KeyError as e :
raise ParseError ( * e . args )
else :
flags = set ( )
# Backwards compatibility with previous notation that allowed to end the alias with a ` ? ` to ignore it
if alias . endswith ( '?' ) :
flags . add ( Entry . FLAG_IGNORED )
alias = alias [ : - 1 ]
if description == '?' :
flags . add ( Entry . FLAG_IGNORED )
line = ( split_line . group ( 'flags' ) or '' , split_line . group ( 'spacing1' ) or '' , split_line . group ( 'alias' ) , split_line . group ( 'spacing2' ) , split_line . group ( 'time' ) , split_line . group ( 'spacing3' ) , split_line . group ( 'description' ) , )
entry_line = Entry ( alias , duration , description , flags = flags , text = line )
return entry_line |
def set_or_delete ( dictionary , key , value ) :
"""Set value as value of dict key key . If value is None , delete key key from dict .
: param dictionary : Dictionary to work on .
: param key : Key to set or delete . If deleting and key does not exist in dict , nothing is done .
: param value : Value to set . If value is None , delete key .
: return : Nothing , modifies dict in place .""" | if value :
dictionary [ key ] = value
else :
if dictionary . get ( key ) :
del dictionary [ key ] |
def detect ( byte_str ) :
"""Detect the encoding of the given byte string .
: param byte _ str : The byte sequence to examine .
: type byte _ str : ` ` bytes ` ` or ` ` bytearray ` `""" | if not isinstance ( byte_str , bytearray ) :
if not isinstance ( byte_str , bytes ) :
raise TypeError ( 'Expected object of type bytes or bytearray, got: ' '{0}' . format ( type ( byte_str ) ) )
else :
byte_str = bytearray ( byte_str )
detector = UniversalDetector ( )
detector . feed ( byte_str )
return detector . close ( ) |
def Dir ( self , name , directory = None , create = True ) :
"""Look up or create a Dir node with the specified name . If
the name is a relative path ( begins with . / , . . / , or a file name ) ,
then it is looked up relative to the supplied directory node ,
or to the top level directory of the FS ( supplied at construction
time ) if no directory is supplied .
This method will raise TypeError if a normal file is found at the
specified path .""" | return self . _lookup ( name , directory , Dir , create ) |
def _sample_action ( self , constraints : Dict [ str , Constraints ] , default : Sequence [ tf . Tensor ] , prob : float = 0.3 ) -> Sequence [ tf . Tensor ] :
'''Samples action fluents respecting the given bound ` constraints ` .
With probability ` prob ` it chooses the action fluent default value ,
with probability 1 - ` prob ` it samples the fluent w . r . t . its bounds .
Args :
constraints ( Dict [ str , Tuple [ Optional [ TensorFluent ] , Optional [ TensorFluent ] ] ] ) : The bounds for each action fluent .
default ( Sequence [ tf . Tensor ] ) : The default action fluents .
prob ( float ) : A probability measure .
Returns :
Sequence [ tf . Tensor ] : A tuple of action fluents .''' | ordering = self . compiler . rddl . domain . action_fluent_ordering
dtypes = map ( rddl2tf . utils . range_type_to_dtype , self . compiler . rddl . action_range_type )
size = self . compiler . rddl . action_size
action = [ ]
for name , dtype , size , default_value in zip ( ordering , dtypes , size , default ) :
action_fluent = self . _sample_action_fluent ( name , dtype , size , constraints , default_value , prob )
action . append ( action_fluent )
return tuple ( action ) |
def scan_interface ( self , address ) :
"""Scan interface for Crazyflies""" | if self . cfusb is None :
try :
self . cfusb = CfUsb ( )
except Exception as e :
logger . warn ( 'Exception while scanning for Crazyflie USB: {}' . format ( str ( e ) ) )
return [ ]
else :
raise Exception ( 'Cannot scan for links while the link is open!' )
# FIXME : implements serial number in the Crazyradio driver !
# serial = " N / A "
found = self . cfusb . scan ( )
self . cfusb . close ( )
self . cfusb = None
return found |
def corrupt_input ( data , sess , corrtype , corrfrac ) :
"""Corrupt a fraction of data according to the chosen noise method .
: return : corrupted data""" | corruption_ratio = np . round ( corrfrac * data . shape [ 1 ] ) . astype ( np . int )
if corrtype == 'none' :
return np . copy ( data )
if corrfrac > 0.0 :
if corrtype == 'masking' :
return masking_noise ( data , sess , corrfrac )
elif corrtype == 'salt_and_pepper' :
return salt_and_pepper_noise ( data , corruption_ratio )
else :
return np . copy ( data ) |
def status ( self ) :
"""The current status of the event ( started , finished or pending ) .""" | myNow = timezone . localtime ( timezone = self . tz )
if getAwareDatetime ( self . date , self . time_to , self . tz ) < myNow :
return "finished"
elif getAwareDatetime ( self . date , self . time_from , self . tz ) < myNow :
return "started" |
def p_always ( self , p ) :
'always : ALWAYS senslist always _ statement' | p [ 0 ] = Always ( p [ 2 ] , p [ 3 ] , lineno = p . lineno ( 1 ) )
p . set_lineno ( 0 , p . lineno ( 1 ) ) |
def add_picture ( self , image_path_or_stream , width = None , height = None ) :
"""Return an | InlineShape | instance containing the image identified by
* image _ path _ or _ stream * , added to the end of this run .
* image _ path _ or _ stream * can be a path ( a string ) or a file - like object
containing a binary image . If neither width nor height is specified ,
the picture appears at its native size . If only one is specified , it
is used to compute a scaling factor that is then applied to the
unspecified dimension , preserving the aspect ratio of the image . The
native size of the picture is calculated using the dots - per - inch
( dpi ) value specified in the image file , defaulting to 72 dpi if no
value is specified , as is often the case .""" | inline = self . part . new_pic_inline ( image_path_or_stream , width , height )
self . _r . add_drawing ( inline )
return InlineShape ( inline ) |
def get_boxed_structure ( self , a , b , c , images = ( 1 , 1 , 1 ) , random_rotation = False , min_dist = 1 , cls = None , offset = None , no_cross = False ) :
"""Creates a Structure from a Molecule by putting the Molecule in the
center of a orthorhombic box . Useful for creating Structure for
calculating molecules using periodic codes .
Args :
a ( float ) : a - lattice parameter .
b ( float ) : b - lattice parameter .
c ( float ) : c - lattice parameter .
images : No . of boxed images in each direction . Defaults to
(1 , 1 , 1 ) , meaning single molecule with 1 lattice parameter
in each direction .
random _ rotation ( bool ) : Whether to apply a random rotation to
each molecule . This jumbles all the molecules so that they
are not exact images of each other .
min _ dist ( float ) : The minimum distance that atoms should be from
each other . This is only used if random _ rotation is True .
The randomized rotations are searched such that no two atoms
are less than min _ dist from each other .
cls : The Structure class to instantiate ( defaults to pymatgen
structure )
offset : Translation to offset molecule from center of mass coords
no _ cross : Whether to forbid molecule coords from extending beyond
boundary of box .
Returns :
Structure containing molecule in a box .""" | if offset is None :
offset = np . array ( [ 0 , 0 , 0 ] )
coords = np . array ( self . cart_coords )
x_range = max ( coords [ : , 0 ] ) - min ( coords [ : , 0 ] )
y_range = max ( coords [ : , 1 ] ) - min ( coords [ : , 1 ] )
z_range = max ( coords [ : , 2 ] ) - min ( coords [ : , 2 ] )
if a <= x_range or b <= y_range or c <= z_range :
raise ValueError ( "Box is not big enough to contain Molecule." )
lattice = Lattice . from_parameters ( a * images [ 0 ] , b * images [ 1 ] , c * images [ 2 ] , 90 , 90 , 90 )
nimages = images [ 0 ] * images [ 1 ] * images [ 2 ]
coords = [ ]
centered_coords = self . cart_coords - self . center_of_mass + offset
for i , j , k in itertools . product ( list ( range ( images [ 0 ] ) ) , list ( range ( images [ 1 ] ) ) , list ( range ( images [ 2 ] ) ) ) :
box_center = [ ( i + 0.5 ) * a , ( j + 0.5 ) * b , ( k + 0.5 ) * c ]
if random_rotation :
while True :
op = SymmOp . from_origin_axis_angle ( ( 0 , 0 , 0 ) , axis = np . random . rand ( 3 ) , angle = random . uniform ( - 180 , 180 ) )
m = op . rotation_matrix
new_coords = np . dot ( m , centered_coords . T ) . T + box_center
if no_cross :
x_max , x_min = max ( new_coords [ : , 0 ] ) , min ( new_coords [ : , 0 ] )
y_max , y_min = max ( new_coords [ : , 1 ] ) , min ( new_coords [ : , 1 ] )
z_max , z_min = max ( new_coords [ : , 2 ] ) , min ( new_coords [ : , 2 ] )
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0 :
raise ValueError ( "Molecule crosses boundary of box." )
if len ( coords ) == 0 :
break
distances = lattice . get_all_distances ( lattice . get_fractional_coords ( new_coords ) , lattice . get_fractional_coords ( coords ) )
if np . amin ( distances ) > min_dist :
break
else :
new_coords = centered_coords + box_center
if no_cross :
x_max , x_min = max ( new_coords [ : , 0 ] ) , min ( new_coords [ : , 0 ] )
y_max , y_min = max ( new_coords [ : , 1 ] ) , min ( new_coords [ : , 1 ] )
z_max , z_min = max ( new_coords [ : , 2 ] ) , min ( new_coords [ : , 2 ] )
if x_max > a or x_min < 0 or y_max > b or y_min < 0 or z_max > c or z_min < 0 :
raise ValueError ( "Molecule crosses boundary of box." )
coords . extend ( new_coords )
sprops = { k : v * nimages for k , v in self . site_properties . items ( ) }
if cls is None :
cls = Structure
return cls ( lattice , self . species * nimages , coords , coords_are_cartesian = True , site_properties = sprops ) . get_sorted_structure ( ) |
def do_block ( parser , token ) :
"""Process several nodes inside a single block
Block functions take ` ` context ` ` , ` ` nodelist ` ` as first arguments
If the second to last argument is ` ` as ` ` , the rendered result is stored in the context and is named whatever the last argument is .
Syntax : :
{ % [ block ] [ var args . . . ] [ name = value kwargs . . . ] [ as varname ] % }
. . . nodelist . . .
{ % end [ block ] % }
Examples : :
{ % render _ block as rendered _ output % }
{ { request . path } } / blog / { { blog . slug } }
{ % endrender _ block % }
{ % highlight _ block python % }
import this
{ % endhighlight _ block % }""" | name , args , kwargs = get_signature ( token , contextable = True )
kwargs [ 'nodelist' ] = parser . parse ( ( 'end%s' % name , ) )
parser . delete_first_token ( )
return BlockNode ( parser , name , * args , ** kwargs ) |
def html ( body , status = 200 , headers = None ) :
'''Returns response object with body in html format .
: param body : Response data to be encoded .
: param status : Response code .
: param headers : Custom Headers .''' | return HTTPResponse ( body , status = status , headers = headers , content_type = 'text/html; charset=utf-8' ) |
def splitpath ( self , path ) :
"""Mimic os . path . splitpath using the specified path _ separator .
Mimics os . path . splitpath using the path _ separator that was specified
for this FakeFilesystem .
Args :
path : ( str ) The path to split .
Returns :
( str ) A duple ( pathname , basename ) for which pathname does not
end with a slash , and basename does not contain a slash .""" | path = self . normcase ( path )
sep = self . _path_separator ( path )
path_components = path . split ( sep )
if not path_components :
return ( '' , '' )
starts_with_drive = self . _starts_with_drive_letter ( path )
basename = path_components . pop ( )
colon = self . _matching_string ( path , ':' )
if not path_components :
if starts_with_drive :
components = basename . split ( colon )
return ( components [ 0 ] + colon , components [ 1 ] )
return ( '' , basename )
for component in path_components :
if component : # The path is not the root ; it contains a non - separator
# component . Strip all trailing separators .
while not path_components [ - 1 ] :
path_components . pop ( )
if starts_with_drive :
if not path_components :
components = basename . split ( colon )
return ( components [ 0 ] + colon , components [ 1 ] )
if ( len ( path_components ) == 1 and path_components [ 0 ] . endswith ( colon ) ) :
return ( path_components [ 0 ] + sep , basename )
return ( sep . join ( path_components ) , basename )
# Root path . Collapse all leading separators .
return ( sep , basename ) |
def call_many ( self , callback , args ) :
"""callback is run with each arg but run a call per second""" | if isinstance ( callback , str ) :
callback = getattr ( self , callback )
f = None
for arg in args :
f = callback ( * arg )
return f |
def raises_gathered ( error_type ) :
'''For use in tests . Many tests expect a single error to be thrown , and
want it to be of a specific type . This is a helper method for when that
type is inside a gathered exception .''' | container = RaisesGatheredContainer ( )
try :
yield container
except GatheredExceptions as e : # Make sure there is exactly one exception .
if len ( e . exceptions ) != 1 :
raise
inner = e . exceptions [ 0 ]
# Make sure the exception is the right type .
if not isinstance ( inner , error_type ) :
raise
# Success .
container . exception = inner |
def plot_title ( ax , pretitle = '' , title = 'Figure' , posttitle = '' , title_fontsize = 14 , title_arg = None ) :
"""Set title options of a matplotlib plot
Args :
ax : matplotlib axes
pretitle ( str ) : String to include before the general title of the figure
posttitle ( str ) : String to include after the general title of the figure
title ( str ) : Set the title for the figure
title _ fontsize ( int ) : Defines the size of the title ' s font
title _ arg ( dict ) : Addition arguments for matplotlib . title ( ) call""" | current_title = ax . get_title ( )
if not current_title :
current_title = pretitle + title + posttitle
title_arg = dict_if_none ( title_arg )
ax . set_title ( current_title , fontsize = title_fontsize , ** title_arg ) |
def format_node ( import_graph , node , indent ) :
"""Helper function for print _ tree""" | if isinstance ( node , graph . NodeSet ) :
ind = ' ' * indent
out = [ ind + 'cycle {' ] + [ format_file_node ( import_graph , n , indent + 1 ) for n in node . nodes ] + [ ind + '}' ]
return '\n' . join ( out )
else :
return format_file_node ( import_graph , node , indent ) |
def Exponential ( rate : vertex_constructor_param_types , label : Optional [ str ] = None ) -> Vertex :
"""One to one constructor for mapping some shape of rate to matching shaped exponential .
: param rate : the rate of the Exponential with either the same shape as specified for this vertex or scalar""" | return Double ( context . jvm_view ( ) . ExponentialVertex , label , cast_to_double_vertex ( rate ) ) |
def set_status ( self , status : Status , increment_try_count : bool = True , filename : str = None ) :
'''Mark the item with the given status .
Args :
status : a value from : class : ` Status ` .
increment _ try _ count : if True , increment the ` ` try _ count ` `
value''' | url = self . url_record . url
assert not self . _try_count_incremented , ( url , status )
if increment_try_count :
self . _try_count_incremented = True
_logger . debug ( __ ( 'Marking URL {0} status {1}.' , url , status ) )
url_result = URLResult ( )
url_result . filename = filename
self . app_session . factory [ 'URLTable' ] . check_in ( url , status , increment_try_count = increment_try_count , url_result = url_result , )
self . _processed = True |
def run_cutadapt ( job , fastqs , univ_options , cutadapt_options ) :
"""Runs cutadapt on the input RNA fastq files .
: param list fastqs : List of fsIDs for input an RNA - Seq fastq pair
: param dict univ _ options : Dict of universal options used by almost all tools
: param dict cutadapt _ options : Options specific to cutadapt
: return : List of fsIDs of cutadapted fastqs
: rtype : list [ toil . fileStore . FileID ]""" | work_dir = os . getcwd ( )
input_files = { 'rna_1.fastq' : fastqs [ 0 ] , 'rna_2.fastq' : fastqs [ 1 ] }
input_files = get_files_from_filestore ( job , input_files , work_dir , docker = False )
# Handle gzipped file
gz = '.gz' if is_gzipfile ( input_files [ 'rna_1.fastq' ] ) else ''
if gz :
for read_file in 'rna_1.fastq' , 'rna_2.fastq' :
os . symlink ( read_file , read_file + gz )
input_files [ read_file + gz ] = input_files [ read_file ] + gz
input_files = { key : docker_path ( path ) for key , path in input_files . items ( ) }
parameters = [ '-a' , cutadapt_options [ 'a' ] , # Fwd read 3 ' adapter
'-A' , cutadapt_options [ 'A' ] , # Rev read 3 ' adapter
'-m' , '35' , # Minimum size of read
'-o' , docker_path ( 'rna_cutadapt_1.fastq.gz' ) , # Output for R1
'-p' , docker_path ( 'rna_cutadapt_2.fastq.gz' ) , # Output for R2
input_files [ 'rna_1.fastq' + gz ] , input_files [ 'rna_2.fastq' + gz ] ]
docker_call ( tool = 'cutadapt' , tool_parameters = parameters , work_dir = work_dir , dockerhub = univ_options [ 'dockerhub' ] , tool_version = cutadapt_options [ 'version' ] )
output_files = [ ]
for fastq_file in [ 'rna_cutadapt_1.fastq.gz' , 'rna_cutadapt_2.fastq.gz' ] :
output_files . append ( job . fileStore . writeGlobalFile ( '/' . join ( [ work_dir , fastq_file ] ) ) )
job . fileStore . logToMaster ( 'Ran cutadapt on %s successfully' % univ_options [ 'patient' ] )
return output_files |
def _process_score ( self , model_name , dependency_cache = None ) :
"""Generates a score for a given model using the ` dependency _ cache ` .""" | version = self [ model_name ] . version
start = time . time ( )
feature_values = self . _solve_features ( model_name , dependency_cache )
logger . debug ( "Extracted features for {0}:{1}:{2} in {3} secs" . format ( self . name , model_name , version , round ( time . time ( ) - start , 3 ) ) )
start = time . time ( )
score = self [ model_name ] . score ( feature_values )
logger . debug ( "Scored features for {0}:{1}:{2} in {3} secs" . format ( self . name , model_name , version , round ( time . time ( ) - start , 3 ) ) )
return score |
def delete_topic ( self , topic ) :
"""Delete a topic .""" | nsq . assert_valid_topic_name ( topic )
return self . _request ( 'POST' , '/topic/delete' , fields = { 'topic' : topic } ) |
def fwd_chunk ( self ) :
"""Returns the chunk following this chunk in the list of free chunks .""" | raise NotImplementedError ( "%s not implemented for %s" % ( self . fwd_chunk . __func__ . __name__ , self . __class__ . __name__ ) ) |
def transform_error_coorb_to_inertial ( vec_coorb , vec_err_coorb , orbPhase , quat_copr ) :
"""Transform error in a vector from the coorbital frame to the inertial
frame . Generates distributions in the coorbital frame , transforms them
to inertial frame and returns 1 - simga widths in the inertial frame .""" | # for reproducibility
np . random . seed ( 0 )
# Get distribution in coorbital frame
dist_coorb = np . array ( [ np . random . normal ( m , s , 1000 ) for m , s in zip ( vec_coorb , vec_err_coorb ) ] ) . T
# Transform distribution to coprecessing frame
dist_copr = rotate_in_plane ( dist_coorb , - orbPhase )
# Transform distribution to inertial frame
dist_inertial = transformTimeDependentVector ( np . array ( [ quat_copr for _ in dist_copr ] ) . T , dist_copr . T ) . T
# Get 1sigma width in inertial frame
vec_err_inertial = np . std ( dist_inertial , axis = 0 )
return vec_err_inertial |
def find_highest_set_bit ( value ) :
"""This function finds and returns the highest set bit number in the given integer value .
For example , the input 6 ( binary ' 110 ' ) will output 4 ( binary ' 100 ' ) .
Args :
value ( int ) : The integer value which we want to find the highest set bit for .
Returns :
int : The value of the highest set bit number .
Examples :
> > > find _ highest _ set _ bit ( 6)
> > > find _ highest _ set _ bit ( 10)
> > > find _ highest _ set _ bit ( 18)
16""" | if value == 0 :
return 0
msb = 0
value = int ( value / 2 )
# equivalent to right shift by 1
while value > 0 :
value = int ( value / 2 )
# equivalent to right shift by 1
msb += 1
return 1 << msb
# Left shift 1 by the count of shifts that were made , this will give the highest set bit ' s value . |
def newDocNode ( self , ns , name , content ) :
"""Creation of a new node element within a document . @ ns and
@ content are optional ( None ) . NOTE : @ content is supposed to
be a piece of XML CDATA , so it allow entities references ,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant ( ) . Use xmlNewDocRawNode ( ) if you
don ' t need entities support .""" | if ns is None :
ns__o = None
else :
ns__o = ns . _o
ret = libxml2mod . xmlNewDocNode ( self . _o , ns__o , name , content )
if ret is None :
raise treeError ( 'xmlNewDocNode() failed' )
__tmp = xmlNode ( _obj = ret )
return __tmp |
def cache ( self , bank , key , fun , loop_fun = None , ** kwargs ) :
'''Check cache for the data . If it is there , check to see if it needs to
be refreshed .
If the data is not there , or it needs to be refreshed , then call the
callback function ( ` ` fun ` ` ) with any given ` ` * * kwargs ` ` .
In some cases , the callback function returns a list of objects which
need to be processed by a second function . If that is the case , then
the second function is passed in as ` ` loop _ fun ` ` . Each item in the
return list from the first function will be the only argument for the
second function .''' | expire_seconds = kwargs . get ( 'expire' , 86400 )
# 1 day
updated = self . updated ( bank , key )
update_cache = False
if updated is None :
update_cache = True
else :
if int ( time . time ( ) ) - updated > expire_seconds :
update_cache = True
data = self . fetch ( bank , key )
if not data or update_cache is True :
if loop_fun is not None :
data = [ ]
items = fun ( ** kwargs )
for item in items :
data . append ( loop_fun ( item ) )
else :
data = fun ( ** kwargs )
self . store ( bank , key , data )
return data |
def identify_kmers ( self , residues ) :
"""Using the covalent linkage information , find out which fragments / subunits form a ligand .""" | # Remove all those not considered by ligands and pairings including alternate conformations
ligdoubles = [ [ ( link . id1 , link . chain1 , link . pos1 ) , ( link . id2 , link . chain2 , link . pos2 ) ] for link in [ c for c in self . covalent if c . id1 in self . lignames_kept and c . id2 in self . lignames_kept and c . conf1 in [ 'A' , '' ] and c . conf2 in [ 'A' , '' ] and ( c . id1 , c . chain1 , c . pos1 ) in residues and ( c . id2 , c . chain2 , c . pos2 ) in residues ] ]
kmers = cluster_doubles ( ligdoubles )
if not kmers : # No ligand kmers , just normal independent ligands
return [ [ residues [ res ] ] for res in residues ]
else : # res _ kmers contains clusters of covalently bound ligand residues ( kmer ligands )
res_kmers = [ [ residues [ res ] for res in kmer ] for kmer in kmers ]
# In this case , add other ligands which are not part of a kmer
in_kmer = [ ]
for res_kmer in res_kmers :
for res in res_kmer :
in_kmer . append ( ( res . GetName ( ) , res . GetChain ( ) , res . GetNum ( ) ) )
for res in residues :
if res not in in_kmer :
newres = [ residues [ res ] , ]
res_kmers . append ( newres )
return res_kmers |
def make_connection ( config = None , default_model = None , _api_version = datastore_rpc . _DATASTORE_V3 , _id_resolver = None ) :
"""Create a new Connection object with the right adapter .
Optionally you can pass in a datastore _ rpc . Configuration object .""" | return datastore_rpc . Connection ( adapter = ModelAdapter ( default_model , id_resolver = _id_resolver ) , config = config , _api_version = _api_version ) |
def plot ( self , title = 'TimeMoc' , view = ( None , None ) ) :
"""Plot the TimeMoc in a time window .
This method uses interactive matplotlib . The user can move its mouse through the plot to see the
time ( at the mouse position ) .
Parameters
title : str , optional
The title of the plot . Set to ' TimeMoc ' by default .
view : ( ` ~ astropy . time . Time ` , ` ~ astropy . time . Time ` ) , optional
Define the view window in which the observations are plotted . Set to ( None , None ) by default ( i . e .
all the observation time window is rendered ) .""" | from matplotlib . colors import LinearSegmentedColormap
import matplotlib . pyplot as plt
if self . _interval_set . empty ( ) :
print ( 'Nothing to print. This TimeMoc object is empty.' )
return
plot_order = 15
if self . max_order > plot_order :
plotted_moc = self . degrade_to_order ( plot_order )
else :
plotted_moc = self
min_jd = plotted_moc . min_time . jd if not view [ 0 ] else view [ 0 ] . jd
max_jd = plotted_moc . max_time . jd if not view [ 1 ] else view [ 1 ] . jd
if max_jd < min_jd :
raise ValueError ( "Invalid selection: max_jd = {0} must be > to min_jd = {1}" . format ( max_jd , min_jd ) )
fig1 = plt . figure ( figsize = ( 9.5 , 5 ) )
ax = fig1 . add_subplot ( 111 )
ax . set_xlabel ( 'iso' )
ax . get_yaxis ( ) . set_visible ( False )
size = 2000
delta = ( max_jd - min_jd ) / size
min_jd_time = min_jd
ax . set_xticks ( [ 0 , size ] )
ax . set_xticklabels ( Time ( [ min_jd_time , max_jd ] , format = 'jd' , scale = 'tdb' ) . iso , rotation = 70 )
y = np . zeros ( size )
for ( s_time_us , e_time_us ) in plotted_moc . _interval_set . _intervals :
s_index = int ( ( s_time_us / TimeMOC . DAY_MICRO_SEC - min_jd_time ) / delta )
e_index = int ( ( e_time_us / TimeMOC . DAY_MICRO_SEC - min_jd_time ) / delta )
y [ s_index : ( e_index + 1 ) ] = 1.0
# hack in case of full time mocs .
if np . all ( y ) :
y [ 0 ] = 0
z = np . tile ( y , ( int ( size // 10 ) , 1 ) )
plt . title ( title )
color_map = LinearSegmentedColormap . from_list ( 'w2r' , [ '#fffff0' , '#aa0000' ] )
color_map . set_under ( 'w' )
color_map . set_bad ( 'gray' )
plt . imshow ( z , interpolation = 'bilinear' , cmap = color_map )
def on_mouse_motion ( event ) :
for txt in ax . texts :
txt . set_visible ( False )
text = ax . text ( 0 , 0 , "" , va = "bottom" , ha = "left" )
time = Time ( event . xdata * delta + min_jd_time , format = 'jd' , scale = 'tdb' )
tx = '{0}' . format ( time . iso )
text . set_position ( ( event . xdata - 50 , 700 ) )
text . set_rotation ( 70 )
text . set_text ( tx )
cid = fig1 . canvas . mpl_connect ( 'motion_notify_event' , on_mouse_motion )
plt . show ( ) |
def get_data ( self , data_key , key = '' ) :
"""Get the data from the cache .
: param data _ key : a key for accessing the data ;
: param key : if provided ( e . g . non - empty string ) , will be used to
decrypt the data as a password ;
: returns : the data extracted from the cache , a python object .""" | flag = False
# set to True if data was successfully extracted .
extracted = self . get ( data_key , - 1 )
if extracted != - 1 :
try :
data , expired , noc , ncalls = self . _from_bytes ( extracted , key = key )
flag = True
except ValueError :
return None , flag
if noc :
ncalls += 1
self [ data_key ] = self . _to_bytes ( data , expired = expired , key = key , noc = noc , ncalls = ncalls )
if ncalls >= noc :
self . remove ( data_key )
flag = False
if expired and datetime . datetime . now ( ) > expired :
self . remove ( data_key )
flag = False
return ( data , flag ) if flag else ( None , flag ) |
def resume ( self ) :
"""Resume all paused VMs in this cluster .""" | log . info ( "Resuming cluster `%s` ..." , self . name )
failed = self . _resume_all_nodes ( )
for node in self . get_all_nodes ( ) :
node . update_ips ( )
self . _gather_node_ip_addresses ( self . get_all_nodes ( ) , self . start_timeout , self . ssh_probe_timeout )
self . repository . save_or_update ( self )
if failed :
log . warning ( "Not all cluster nodes have been successfully " "restarted. Check error messages above and consider " "re-running `elasticluster resume %s` if " "necessary." , self . name )
return
if not self . _setup_provider . resume_cluster ( self ) :
log . warning ( "Elasticluster was not able to guarantee that the " "cluster restarted correctly - check the errors " "above and check your config." ) |
def register ( self , app , * args , ** kwargs ) :
"Activate loginmanager and principal ." | if not self . _login_manager or self . app != app :
self . _login_manager = LoginManager ( )
self . _login_manager . user_callback = self . user_loader
self . _login_manager . setup_app ( app )
self . _login_manager . login_view = 'urls.index'
self . _login_manager . login_message = u'You need to be signed in for this page.'
self . app = app
if not self . _principal :
self . _principal = Principal ( app )
identity_loaded . connect ( self . identity_loaded )
super ( UserManager , self ) . register ( app , * args , ** kwargs ) |
def delete_external_event ( self , calendar_id , event_uid ) :
"""Delete an external event from the specified calendar .
: param string calendar _ id : ID of calendar to delete from .
: param string event _ uid : ID of event to delete .""" | self . request_handler . delete ( endpoint = 'calendars/%s/events' % calendar_id , data = { 'event_uid' : event_uid } ) |
def getreferingobjs ( self , iddgroups = None , fields = None ) :
"""Get a list of objects that refer to this object""" | return getreferingobjs ( self , iddgroups = iddgroups , fields = fields ) |
def team_id ( self , team ) :
'''Get team ID using a real team name
@ return : id''' | # UTF - 8 comparison
headers = { "Content-type" : "application/x-www-form-urlencoded" , "Accept" : "text/plain" , 'Referer' : 'http://' + self . domain + '/' , "User-Agent" : user_agent }
req = self . session . get ( 'http://' + self . domain , headers = headers ) . content
soup = BeautifulSoup ( req )
for i in soup . find ( 'table' , cellpadding = 2 ) . find_all ( 'tr' ) : # Get teamid from the bets
team1 = i . find ( 'a' ) [ 'title' ]
team2 = i . find_all ( 'a' ) [ 1 ] [ 'title' ]
if ( team == team1 ) :
return i . find ( 'a' ) [ 'href' ] . split ( 'cid=' ) [ 1 ]
elif ( team == team2 ) :
return i . find_all ( 'a' ) [ 1 ] [ 'href' ] . split ( 'cid=' ) [ 1 ]
return None |
def _spawn_new ( self , count = 1 ) :
"""Spawn some new tiles .""" | free = self . free_cells ( )
for x , y in random . sample ( free , min ( count , len ( free ) ) ) :
self . grid [ y ] [ x ] = random . randint ( 0 , 10 ) and 2 or 4 |
def version ( self ) :
"""Return version from installed packages""" | if self . find :
return self . meta . sp + split_package ( self . find ) [ 1 ]
return "" |
def get_header_guard_dmlc ( filename ) :
"""Get Header Guard Convention for DMLC Projects .
For headers in include , directly use the path
For headers in src , use project name plus path
Examples : with project - name = dmlc
include / dmlc / timer . h - > DMLC _ TIMTER _ H _
src / io / libsvm _ parser . h - > DMLC _ IO _ LIBSVM _ PARSER _ H _""" | fileinfo = cpplint . FileInfo ( filename )
file_path_from_root = fileinfo . RepositoryName ( )
inc_list = [ 'include' , 'api' , 'wrapper' ]
if file_path_from_root . find ( 'src/' ) != - 1 and _HELPER . project_name is not None :
idx = file_path_from_root . find ( 'src/' )
file_path_from_root = _HELPER . project_name + file_path_from_root [ idx + 3 : ]
else :
for spath in inc_list :
prefix = spath + os . sep
if file_path_from_root . startswith ( prefix ) :
file_path_from_root = re . sub ( '^' + prefix , '' , file_path_from_root )
break
return re . sub ( r'[-./\s]' , '_' , file_path_from_root ) . upper ( ) + '_' |
def top ( self , topn , by = 'counts' ) :
"""Get the top ` ` topn ` ` features in the : class : ` . FeatureSet ` \ .
Parameters
topn : int
Number of features to return .
by : str
( default : ' counts ' ) How features should be sorted . Must be ' counts '
or ' documentcounts ' .
Returns
list""" | if by not in [ 'counts' , 'documentCounts' ] :
raise NameError ( 'kwarg `by` must be "counts" or "documentCounts"' )
cvalues = getattr ( self , by )
order = argsort ( list ( cvalues . values ( ) ) ) [ : : - 1 ] [ : topn ]
keys = list ( cvalues . keys ( ) )
return [ ( self . index [ keys [ i ] ] , cvalues [ keys [ i ] ] ) for i in order ] |
def main ( self ) :
"""Generates an output string by replacing the keywords in the format
string with the corresponding values from a submission dictionary .""" | self . manage_submissions ( )
out_string = self . options [ 'format' ]
# Pop until we get something which len ( title ) < = max - chars
length = float ( 'inf' )
while length > self . options [ 'max_chars' ] :
self . selected_submission = self . submissions . pop ( )
length = len ( self . selected_submission [ 'title' ] )
for k , v in self . selected_submission . items ( ) :
out_string = out_string . replace ( k , self . h . unescape ( str ( v ) ) )
return self . output ( out_string , out_string ) |
def corners_to_keypoints ( corners ) :
"""function to take the corners from cv2 . GoodFeaturesToTrack and return cv2 . KeyPoints""" | if corners is None :
keypoints = [ ]
else :
keypoints = [ cv2 . KeyPoint ( kp [ 0 ] [ 0 ] , kp [ 0 ] [ 1 ] , 1 ) for kp in corners ]
return keypoints |
def execute_operation ( self , operation : OperationDefinitionNode , root_value : Any ) -> Optional [ AwaitableOrValue [ Any ] ] :
"""Execute an operation .
Implements the " Evaluating operations " section of the spec .""" | type_ = get_operation_root_type ( self . schema , operation )
fields = self . collect_fields ( type_ , operation . selection_set , { } , set ( ) )
path = None
# Errors from sub - fields of a NonNull type may propagate to the top level , at
# which point we still log the error and null the parent field , which in this
# case is the entire response .
# Similar to complete _ value _ catching _ error .
try :
result = ( self . execute_fields_serially if operation . operation == OperationType . MUTATION else self . execute_fields ) ( type_ , root_value , path , fields )
except GraphQLError as error :
self . errors . append ( error )
return None
except Exception as error :
error = GraphQLError ( str ( error ) , original_error = error )
self . errors . append ( error )
return None
else :
if isawaitable ( result ) : # noinspection PyShadowingNames
async def await_result ( ) :
try :
return await result
except GraphQLError as error :
self . errors . append ( error )
except Exception as error :
error = GraphQLError ( str ( error ) , original_error = error )
self . errors . append ( error )
return await_result ( )
return result |
def readline ( file , skip_blank = False ) :
"""Read a line from provided file , skipping any blank or comment lines""" | while 1 :
line = file . readline ( )
# print " every line : % r " % line
if not line :
return None
if line [ 0 ] != '#' and not ( skip_blank and line . isspace ( ) ) :
return line |
def get_type_item ( self , value , map_name = None , instances = None ) :
"""Converts the input to a InputConfigId tuple . It can be from a single string , list , or tuple . Single values
( also single - element lists or tuples ) are considered to be a container configuration on the default map . A string
with two elements separated by a dot or two - element lists / tuples are considered to be referring to a specific
map and configuration . Three strings concatenated with a dot or three - element lists / tuples are considered to be
referring to a map , configuration , and instances . Multiple instances can be specified in the third element by
passing a tuple or list .
: param value : Input value for conversion .
: param map _ name : Map name ; provides the default map name unless otherwise specified in ` ` value ` ` .
: type map _ name : unicode | str
: param instances : Instance names ; instances to set if not otherwise specified in ` ` value ` ` .
: type instances : unicode | str | tuple [ unicode | str | NoneType ]
: return : InputConfigId tuple .
: rtype : InputConfigId""" | if isinstance ( value , InputConfigId ) :
return value
elif isinstance ( value , MapConfigId ) :
if value . instance_name :
v_instances = value . instance_name ,
else :
v_instances = None
return InputConfigId ( value . config_type , value . map_name , value . config_name , v_instances or instances )
elif isinstance ( value , six . string_types ) :
s_map_name , __ , s_config_name = value . partition ( '.' )
if s_config_name :
config_name , __ , s_instance = s_config_name . partition ( '.' )
if s_instance :
s_instances = s_instance ,
else :
s_instances = None
else :
config_name = s_map_name
s_map_name = map_name
s_instances = None
return InputConfigId ( ItemType . CONTAINER , s_map_name , config_name , s_instances or instances )
elif isinstance ( value , ( tuple , list ) ) :
v_len = len ( value )
if v_len == 3 :
v_instances = value [ 2 ]
if not v_instances :
return InputConfigId ( ItemType . CONTAINER , value [ 0 ] , value [ 1 ] )
if isinstance ( v_instances , tuple ) :
return InputConfigId ( ItemType . CONTAINER , * value )
elif isinstance ( v_instances , list ) :
return InputConfigId ( ItemType . CONTAINER , value [ 0 ] , value [ 1 ] , tuple ( v_instances ) )
elif isinstance ( v_instances , six . string_types ) :
return InputConfigId ( ItemType . CONTAINER , value [ 0 ] , value [ 1 ] , ( v_instances , ) )
raise ValueError ( "Invalid type of instance specification in '{0}'; expected a list, tuple, or string type, " "found {1}." . format ( value , type ( v_instances ) . __name__ ) , v_instances )
elif v_len == 2 :
return InputConfigId ( ItemType . CONTAINER , value [ 0 ] or map_name , value [ 1 ] , instances )
elif v_len == 1 :
return InputConfigId ( ItemType . CONTAINER , map_name , value [ 0 ] , instances )
raise ValueError ( "Invalid element length; only tuples and lists of length 1-3 can be converted to a " "InputConfigId tuple. Found length {0}." . format ( v_len ) )
elif isinstance ( value , dict ) :
kwargs = { 'config_type' : ItemType . CONTAINER , 'map_name' : map_name , 'instance_names' : instances , }
kwargs . update ( value )
return InputConfigId ( ** kwargs )
raise ValueError ( "Invalid type; expected a list, tuple, dict, or string type, found {0}." . format ( type ( value ) . __name__ ) ) |
def edgelist_to_adjacency ( edgelist ) :
"""Converts an iterator of edges to an adjacency dict .
Args :
edgelist ( iterable ) :
An iterator over 2 - tuples where each 2 - tuple is an edge .
Returns :
dict : The adjacency dict . A dict of the form { v : Nv , . . . } where v is a node in a graph and
Nv is the neighbors of v as an set .""" | adjacency = dict ( )
for u , v in edgelist :
if u in adjacency :
adjacency [ u ] . add ( v )
else :
adjacency [ u ] = { v }
if v in adjacency :
adjacency [ v ] . add ( u )
else :
adjacency [ v ] = { u }
return adjacency |
def create_regex_patterns ( symbols ) :
u"""create regex patterns for text , google , docomo , kddi and softbank via ` symbols `
create regex patterns for finding emoji character from text . the pattern character use
` unicode ` formatted character so you have to decode text which is not decoded .""" | pattern_unicode = [ ]
pattern_google = [ ]
pattern_docomo = [ ]
pattern_kddi = [ ]
pattern_softbank = [ ]
for x in symbols :
if x . unicode . code :
pattern_unicode . append ( re . escape ( unicode ( x . unicode ) ) )
if x . google . code :
pattern_google . append ( re . escape ( unicode ( x . google ) ) )
if x . docomo . code :
pattern_docomo . append ( re . escape ( unicode ( x . docomo ) ) )
if x . kddi . code :
pattern_kddi . append ( re . escape ( unicode ( x . kddi ) ) )
if x . softbank . code :
pattern_softbank . append ( re . escape ( unicode ( x . softbank ) ) )
# pattern _ unicode = re . compile ( u " [ % s ] " % u ' ' . join ( pattern _ unicode ) )
# pattern _ google = re . compile ( u " [ % s ] " % u ' ' . join ( pattern _ google ) )
# pattern _ docomo = re . compile ( u " [ % s ] " % u ' ' . join ( pattern _ docomo ) )
# pattern _ kddi = re . compile ( u " [ % s ] " % u ' ' . join ( pattern _ kddi ) )
# pattern _ softbank = re . compile ( u " [ % s ] " % u ' ' . join ( pattern _ softbank ) )
pattern_unicode = re . compile ( u"%s" % u'|' . join ( pattern_unicode ) )
pattern_google = re . compile ( u"%s" % u'|' . join ( pattern_google ) )
pattern_docomo = re . compile ( u"%s" % u'|' . join ( pattern_docomo ) )
pattern_kddi = re . compile ( u"%s" % u'|' . join ( pattern_kddi ) )
pattern_softbank = re . compile ( u"%s" % u'|' . join ( pattern_softbank ) )
return { # forward reverse
'text' : ( None , pattern_unicode ) , 'docomo_img' : ( None , pattern_unicode ) , 'kddi_img' : ( None , pattern_unicode ) , 'softbank_img' : ( None , pattern_unicode ) , 'google' : ( pattern_google , pattern_unicode ) , 'docomo' : ( pattern_docomo , pattern_unicode ) , 'kddi' : ( pattern_kddi , pattern_unicode ) , 'softbank' : ( pattern_softbank , pattern_unicode ) , } |
def dquoteEscape ( param ) :
"""Return param , or " param " if ' , ' or ' ; ' or ' : ' is in param .""" | if param . find ( '"' ) >= 0 :
raise VObjectError ( "Double quotes aren't allowed in parameter values." )
for char in ',;:' :
if param . find ( char ) >= 0 :
return '"' + param + '"'
return param |
def decrypt ( self , data , decode = False ) :
"""Decrypt the given data with cipher that is got from AES . cipher call .
: param data : data to decrypt
: param decode : whether to decode bytes to str or not
: return : bytes or str ( depends on decode flag )""" | # result = self . cipher ( ) . decrypt ( data )
result = self . cipher ( ) . decrypt_block ( data )
padding = self . mode ( ) . padding ( )
if padding is not None :
result = padding . reverse_pad ( result , WAESMode . __data_padding_length__ )
return result . decode ( ) if decode else result |
def convConn ( self , preCellsTags , postCellsTags , connParam ) :
from . . import sim
'''Generates connections between all pre and post - syn cells based on probability values''' | if sim . cfg . verbose :
print ( 'Generating set of convergent connections (rule: %s) ...' % ( connParam [ 'label' ] ) )
# get list of params that have a lambda function
paramsStrFunc = [ param for param in [ p + 'Func' for p in self . connStringFuncParams ] if param in connParam ]
# copy the vars into args immediately and work out which keys are associated with lambda functions only once per method
funcKeys = { }
for paramStrFunc in paramsStrFunc :
connParam [ paramStrFunc + 'Args' ] = connParam [ paramStrFunc + 'Vars' ] . copy ( )
funcKeys [ paramStrFunc ] = [ key for key in connParam [ paramStrFunc + 'Vars' ] if callable ( connParam [ paramStrFunc + 'Vars' ] [ key ] ) ]
# converted to list only once
preCellsTagsKeys = sorted ( preCellsTags )
# calculate hash for post cell gids
hashPreCells = sim . hashList ( preCellsTagsKeys )
for postCellGid , postCellTags in postCellsTags . items ( ) : # for each postsyn cell
if postCellGid in self . gid2lid : # check if postsyn is in this node
convergence = connParam [ 'convergenceFunc' ] [ postCellGid ] if 'convergenceFunc' in connParam else connParam [ 'convergence' ]
# num of presyn conns / postsyn cell
convergence = max ( min ( int ( round ( convergence ) ) , len ( preCellsTags ) - 1 ) , 0 )
self . rand . Random123 ( hashPreCells , postCellGid , sim . cfg . seeds [ 'conn' ] )
# init randomizer
randSample = self . randUniqueInt ( self . rand , convergence + 1 , 0 , len ( preCellsTags ) - 1 )
# note : randSample [ divergence ] is an extra value used only if one of the random postGids coincided with the preGid
preCellsSample = { preCellsTagsKeys [ randSample [ convergence ] ] if preCellsTagsKeys [ i ] == postCellGid else preCellsTagsKeys [ i ] : 0 for i in randSample [ 0 : convergence ] }
# dict of selected gids of postsyn cells with removed post gid
preCellsConv = { k : v for k , v in preCellsTags . items ( ) if k in preCellsSample }
# dict of selected presyn cells tags
for preCellGid , preCellTags in preCellsConv . items ( ) : # for each presyn cell
for paramStrFunc in paramsStrFunc : # call lambda functions to get weight func args
# update the relevant FuncArgs dict where lambda functions are known to exist in the corresponding FuncVars dict
for funcKey in funcKeys [ paramStrFunc ] :
connParam [ paramStrFunc + 'Args' ] [ funcKey ] = connParam [ paramStrFunc + 'Vars' ] [ funcKey ] ( preCellTags , postCellTags )
if preCellGid != postCellGid : # if not self - connection
self . _addCellConn ( connParam , preCellGid , postCellGid ) |
def references_json_unknown_details ( ref_content , soup = None ) :
"Extract detail value for references of type unknown" | details = ""
# Try adding pages values first
if "pages" in ref_content :
if "range" in ref_content [ "pages" ] :
details += ref_content [ "pages" ] [ "range" ]
else :
details += ref_content [ "pages" ]
if soup : # Attempt to find the XML element by id , and convert it to details
if "id" in ref_content :
ref_tag = first ( soup . select ( "ref#" + ref_content [ "id" ] ) )
if ref_tag : # Now remove tags that would be already part of the unknown reference by now
for remove_tag in [ "person-group" , "year" , "article-title" , "elocation-id" , "fpage" , "lpage" ] :
ref_tag = remove_tag_from_tag ( ref_tag , remove_tag )
# Add the remaining tag content comma separated
for tag in first ( raw_parser . element_citation ( ref_tag ) ) :
if node_text ( tag ) is not None :
if details != "" :
details += ", "
details += node_text ( tag )
if details == "" :
return None
else :
return details |
def GetParentFileEntry ( self ) :
"""Retrieves the parent file entry .
Returns :
OSFileEntry : parent file entry or None if not available .""" | location = getattr ( self . path_spec , 'location' , None )
if location is None :
return None
parent_location = self . _file_system . DirnamePath ( location )
if parent_location is None :
return None
if parent_location == '' :
parent_location = self . _file_system . PATH_SEPARATOR
path_spec = os_path_spec . OSPathSpec ( location = parent_location )
return OSFileEntry ( self . _resolver_context , self . _file_system , path_spec ) |
def _defgate ( self , program , gate_name , gate_matrix ) :
"""Defines a gate named gate _ name with matrix gate _ matrix in program . In addition , updates
self . defined _ gates to track what has been defined .
: param Program program : Pyquil Program to add the defgate and gate to .
: param str gate _ name : The name of the gate to add to program .
: param numpy . ndarray gate _ matrix : The array corresponding to the gate to define .
: return : the modified Program .
: retype : Program""" | new_program = pq . Program ( )
new_program += program
if gate_name not in self . defined_gates :
new_program . defgate ( gate_name , gate_matrix )
self . defined_gates . add ( gate_name )
return new_program |
def role_create ( role , owner = None , grants = None , ** kwargs ) :
'''Creates a new database role .
If no owner is specified , the role will be owned by the user that
executes CREATE ROLE , which is the user argument or mssql . user option .
grants is list of strings .
CLI Example :
. . code - block : : bash
salt minion mssql . role _ create role = product01 owner = sysdba grants = ' [ " SELECT " , " INSERT " , " UPDATE " , " DELETE " , " EXECUTE " ] ' ''' | if not grants :
grants = [ ]
sql = 'CREATE ROLE {0}' . format ( role )
if owner :
sql += ' AUTHORIZATION {0}' . format ( owner )
conn = None
try :
conn = _get_connection ( ** kwargs )
conn . autocommit ( True )
# cur = conn . cursor ( )
# cur . execute ( sql )
conn . cursor ( ) . execute ( sql )
for grant in grants :
conn . cursor ( ) . execute ( 'GRANT {0} TO [{1}]' . format ( grant , role ) )
except Exception as e :
return 'Could not create the role: {0}' . format ( e )
finally :
if conn :
conn . autocommit ( False )
conn . close ( )
return True |
def sortlevel ( self , level = None , ascending = True , sort_remaining = None ) :
"""For internal compatibility with with the Index API .
Sort the Index . This is for compat with MultiIndex
Parameters
ascending : boolean , default True
False to sort in descending order
level , sort _ remaining are compat parameters
Returns
Index""" | return self . sort_values ( return_indexer = True , ascending = ascending ) |
def validate_set_ops ( df , other ) :
"""Helper function to ensure that DataFrames are valid for set operations .
Columns must be the same name in the same order , and indices must be of the
same dimension with the same names .""" | if df . columns . values . tolist ( ) != other . columns . values . tolist ( ) :
not_in_df = [ col for col in other . columns if col not in df . columns ]
not_in_other = [ col for col in df . columns if col not in other . columns ]
error_string = 'Error: not compatible.'
if len ( not_in_df ) :
error_string += ' Cols in y but not x: ' + str ( not_in_df ) + '.'
if len ( not_in_other ) :
error_string += ' Cols in x but not y: ' + str ( not_in_other ) + '.'
raise ValueError ( error_string )
if len ( df . index . names ) != len ( other . index . names ) :
raise ValueError ( 'Index dimension mismatch' )
if df . index . names != other . index . names :
raise ValueError ( 'Index mismatch' )
else :
return |
def flatten ( ele ) :
"""flatten recursively defined list ,
e . g . [ 1,2,3 , [ 4,5 ] , [ 6 , [ 8,9 , [ 10 , [ 11 , ' x ' ] ] ] ] ]
: param ele : recursive list , i . e . list in list in list . . .
: return : generator object""" | for el in ele :
if isinstance ( el , list ) or isinstance ( el , tuple ) :
for e in Models . flatten ( el ) :
yield e
else :
yield el |
def inserir ( self , id_equipment , id_script ) :
"""Inserts a new Related Equipment with Script and returns its identifier
: param id _ equipment : Identifier of the Equipment . Integer value and greater than zero .
: param id _ script : Identifier of the Script . Integer value and greater than zero .
: return : Dictionary with the following structure :
{ ' equipamento _ roteiro ' : { ' id ' : < id _ equipment _ script > } }
: raise InvalidParameterError : The identifier of Equipment or Script is null and invalid .
: raise RoteiroNaoExisteError : Script not registered .
: raise EquipamentoNaoExisteError : Equipment not registered .
: raise EquipamentoRoteiroError : Equipment is already associated with the script .
: raise DataBaseError : Networkapi failed to access the database .
: raise XMLError : Networkapi failed to generate the XML response""" | equipment_script_map = dict ( )
equipment_script_map [ 'id_equipment' ] = id_equipment
equipment_script_map [ 'id_script' ] = id_script
code , xml = self . submit ( { 'equipment_script' : equipment_script_map } , 'POST' , 'equipmentscript/' )
return self . response ( code , xml ) |
def password_entropy ( length : int , chars : str ) -> float :
"""Calculate the entropy of a password with given length and chars .""" | if not isinstance ( length , int ) :
raise TypeError ( 'length can only be int' )
if length < 0 :
raise ValueError ( 'length should be greater than 0' )
if not isinstance ( chars , str ) :
raise TypeError ( 'chars can only be string' )
if not chars :
raise ValueError ( "chars can't be null" )
if length == 0 :
return 0.0
entropy_c = entropy_bits ( list ( chars ) )
return float ( length * entropy_c ) |
def serveWeek ( self , request , year = None , week = None ) :
"""Weekly calendar view .""" | myurl = self . get_url ( request )
def myUrl ( urlYear , urlWeek ) :
if ( urlYear < 1900 or urlYear > 2099 or urlYear == 2099 and urlWeek == 53 ) :
return None
if urlWeek == 53 and num_weeks_in_year ( urlYear ) == 52 :
urlWeek = 52
return myurl + self . reverse_subpage ( 'serveWeek' , args = [ urlYear , urlWeek ] )
today = timezone . localdate ( )
thisYear , thisWeekNum , dow = gregorian_to_week_date ( today )
if year is None :
year = thisYear
if week is None :
week = thisWeekNum
year = int ( year )
week = int ( week )
firstDay , lastDay , prevYearNumWeeks , yearNumWeeks = week_info ( year , week )
if week == 53 and yearNumWeeks == 52 :
raise Http404 ( "Only 52 weeks in {}" . format ( year ) )
eventsInWeek = self . _getEventsByDay ( request , firstDay , lastDay )
if firstDay . year >= 1900 :
monthlyUrl = myurl + self . reverse_subpage ( 'serveMonth' , args = [ firstDay . year , firstDay . month ] )
else :
monthlyUrl = myurl + self . reverse_subpage ( 'serveMonth' , args = [ 1900 , 1 ] )
listUrl = myurl + self . reverse_subpage ( 'serveUpcoming' )
prevWeek = week - 1
prevWeekYear = year
if prevWeek == 0 :
prevWeek = prevYearNumWeeks
prevWeekYear -= 1
nextWeek = week + 1
nextWeekYear = year
if nextWeek > yearNumWeeks :
nextWeek = 1
nextWeekYear += 1
# TODO Consider changing to a TemplateResponse
# https : / / stackoverflow . com / questions / 38838601
return render ( request , "joyous/calendar_week.html" , { 'self' : self , 'page' : self , 'version' : __version__ , 'year' : year , 'week' : week , 'today' : today , 'yesterday' : today - dt . timedelta ( 1 ) , 'prevWeekUrl' : myUrl ( prevWeekYear , prevWeek ) , 'nextWeekUrl' : myUrl ( nextWeekYear , nextWeek ) , 'prevYearUrl' : myUrl ( year - 1 , week ) , 'nextYearUrl' : myUrl ( year + 1 , week ) , 'thisWeekUrl' : myUrl ( thisYear , thisWeekNum ) , 'monthlyUrl' : monthlyUrl , 'listUrl' : listUrl , 'weekName' : _ ( "Week {weekNum}" ) . format ( weekNum = week ) , 'weekdayAbbr' : weekday_abbr , 'events' : [ eventsInWeek ] } ) |
def edit_ticket_links ( self , ticket_id , ** kwargs ) :
"""Edit ticket links .
. . warning : : This method is deprecated in favour of edit _ link method , because
there exists bug in RT 3.8 REST API causing mapping created links to
ticket / 1 . The only drawback is that edit _ link cannot process multiple
links all at once .
: param ticket _ id : ID of ticket to edit
: keyword kwargs : Other arguments possible to set : DependsOn ,
DependedOnBy , RefersTo , ReferredToBy , Members ,
MemberOf . Each value should be either ticker ID or
external link . Int types are converted . Use empty
string as value to delete existing link .
: returns : ` ` True ` `
Operation was successful
` ` False ` `
Ticket with given ID does not exist or unknown parameter
was set ( in this case all other valid fields are changed )""" | post_data = ''
for key in kwargs :
post_data += "{}: {}\n" . format ( key , str ( kwargs [ key ] ) )
msg = self . __request ( 'ticket/{}/links' . format ( str ( ticket_id ) , ) , post_data = { 'content' : post_data } )
state = msg . split ( '\n' ) [ 2 ]
return self . RE_PATTERNS [ 'links_updated_pattern' ] . match ( state ) is not None |
def setup_datafind_server_connection ( cp , tags = None ) :
"""This function is resposible for setting up the connection with the datafind
server .
Parameters
cp : pycbc . workflow . configuration . WorkflowConfigParser
The memory representation of the ConfigParser
Returns
connection
The open connection to the datafind server .""" | if tags is None :
tags = [ ]
if cp . has_option_tags ( "workflow-datafind" , "datafind-ligo-datafind-server" , tags ) :
datafind_server = cp . get_opt_tags ( "workflow-datafind" , "datafind-ligo-datafind-server" , tags )
else :
datafind_server = None
return datafind_connection ( datafind_server ) |
def is_seq ( arg ) :
'''is _ seq ( arg ) yields True if arg is a sequential collection otherwise False ; i . e . , it must be a
list , tuple , persistent vector , persistent list , or numpy array .
Note that strings are not considered sequences .''' | return isinstance ( arg , ( list_type , tuple_type , pyr . PVector , pyr . PList ) ) or is_nparray ( arg ) |
def bm3_k ( p , v0 , k0 , k0p ) :
"""calculate bulk modulus , wrapper for cal _ k _ bm3
cannot handle uncertainties
: param p : pressure
: param v0 : volume at reference conditions
: param k0 : bulk modulus at reference conditions
: param k0p : pressure derivative of bulk modulus at different conditions
: return : bulk modulus at high pressure""" | return cal_k_bm3 ( p , [ v0 , k0 , k0p ] ) |
def _file_path ( self , src_path , dest , regex ) :
"""check src _ path complies with regex and generate new filename""" | m = re . search ( regex , src_path )
if dest . endswith ( '/' ) or dest == '' :
dest += '{filename}'
names = m . groupdict ( )
if not names and m . groups ( ) :
names = { 'filename' : m . groups ( ) [ - 1 ] }
for name , value in names . items ( ) :
dest = dest . replace ( '{%s}' % name , value )
# remove starting slash so path can ' t be absolute
dest = dest . strip ( ' /' )
if not dest :
progress_logger . error ( 'destination path must not resolve to be null' )
raise GrablibError ( 'bad path' )
new_path = self . download_root . joinpath ( dest )
new_path . relative_to ( self . download_root )
return new_path |
async def updateWorkerType ( self , * args , ** kwargs ) :
"""Update Worker Type
Provide a new copy of a worker type to replace the existing one .
This will overwrite the existing worker type definition if there
is already a worker type of that name . This method will return a
200 response along with a copy of the worker type definition created
Note that if you are using the result of a GET on the worker - type
end point that you will need to delete the lastModified and workerType
keys from the object returned , since those fields are not allowed
the request body for this method
Otherwise , all input requirements and actions are the same as the
create method .
This method takes input : ` ` http : / / schemas . taskcluster . net / aws - provisioner / v1 / create - worker - type - request . json # ` `
This method gives output : ` ` http : / / schemas . taskcluster . net / aws - provisioner / v1 / get - worker - type - response . json # ` `
This method is ` ` stable ` `""" | return await self . _makeApiCall ( self . funcinfo [ "updateWorkerType" ] , * args , ** kwargs ) |
def _set_shutdown_management_oper ( self , v , load = False ) :
"""Setter method for shutdown _ management _ oper , mapped from YANG variable / interface / management / shutdown _ management _ oper ( string )
If this variable is read - only ( config : false ) in the
source YANG file , then _ set _ shutdown _ management _ oper is considered as a private
method . Backends looking to populate this variable should
do so via calling thisObj . _ set _ shutdown _ management _ oper ( ) directly .
YANG Description : Show the status of this management interface .""" | if hasattr ( v , "_utype" ) :
v = v . _utype ( v )
try :
t = YANGDynClass ( v , base = unicode , is_leaf = True , yang_name = "shutdown_management_oper" , rest_name = "oper-status" , parent = self , path_helper = self . _path_helper , extmethods = self . _extmethods , register_paths = True , extensions = { u'tailf-common' : { u'info' : u'Show the status of this management interface.' , u'alt-name' : u'oper-status' } } , namespace = 'urn:brocade.com:mgmt:brocade-interface' , defining_module = 'brocade-interface' , yang_type = 'string' , is_config = False )
except ( TypeError , ValueError ) :
raise ValueError ( { 'error-string' : """shutdown_management_oper must be of a type compatible with string""" , 'defined-type' : "string" , 'generated-type' : """YANGDynClass(base=unicode, is_leaf=True, yang_name="shutdown_management_oper", rest_name="oper-status", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Show the status of this management interface.', u'alt-name': u'oper-status'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='string', is_config=False)""" , } )
self . __shutdown_management_oper = t
if hasattr ( self , '_set' ) :
self . _set ( ) |
def job_factory ( self ) :
"""Create concrete jobs . The concrete jobs is following dictionary .
jobs = {
' PLUGINNAME - build _ items ' : {
' method ' : FUNCTION _ OBJECT ,
' interval ' : INTERVAL _ TIME ,
If ConcreteJob instance has " build _ discovery _ items " ,
" build _ discovery _ items " method is added to jobs .
warn : looped method is deprecated in 0.4.0.
You should implemente " build _ items " instead of " looped _ method " .
In most cases you need only to change the method name .""" | jobs = dict ( )
for section , options in self . config . items ( ) :
if section == 'global' :
continue
# Since validate in utils / configread , does not occur here Error
# In the other sections are global ,
# that there is a " module " option is collateral .
plugin_name = options [ 'module' ]
job_kls = self . plugins [ plugin_name ]
if hasattr ( job_kls , '__init__' ) :
job_argspec = inspect . getargspec ( job_kls . __init__ )
if 'stats_queue' in job_argspec . args :
job_obj = job_kls ( options = options , queue = self . queue , stats_queue = self . stats_queue , logger = self . logger )
else :
job_obj = job_kls ( options = options , queue = self . queue , logger = self . logger )
# Deprecated ! !
if hasattr ( job_obj , 'looped_method' ) :
self . logger . warn ( ( '{0}\'s "looped_method" is deprecated.' 'Pleases change method name to "build_items"' '' . format ( plugin_name ) ) )
name = '-' . join ( [ section , 'looped_method' ] )
interval = 60
if 'interval' in options :
interval = options [ 'interval' ]
elif 'interval' in self . config [ 'global' ] :
interval = self . config [ 'global' ] [ 'interval' ]
jobs [ name ] = { 'method' : job_obj . looped_method , 'interval' : interval , }
if hasattr ( job_obj , 'build_items' ) :
name = '-' . join ( [ section , 'build_items' ] )
interval = 60
if 'interval' in options :
interval = options [ 'interval' ]
elif 'interval' in self . config [ 'global' ] :
interval = self . config [ 'global' ] [ 'interval' ]
jobs [ name ] = { 'method' : job_obj . build_items , 'interval' : interval , }
self . logger . info ( 'load plugin {0} (interval {1})' '' . format ( plugin_name , interval ) )
if hasattr ( job_obj , 'build_discovery_items' ) :
name = '-' . join ( [ section , 'build_discovery_items' ] )
lld_interval = 600
if 'lld_interval' in options :
lld_interval = options [ 'lld_interval' ]
elif 'lld_interval' in self . config [ 'global' ] :
lld_interval = self . config [ 'global' ] [ 'lld_interval' ]
jobs [ name ] = { 'method' : job_obj . build_discovery_items , 'interval' : lld_interval , }
self . logger . info ( 'load plugin {0} (lld_interval {1})' '' . format ( plugin_name , lld_interval ) )
return jobs |
def clear_cache ( ) :
'''Clear out cached state files , forcing even cache runs to refresh the cache
on the next state execution .
Remember that the state cache is completely disabled by default , this
execution only applies if cache = True is used in states
CLI Example :
. . code - block : : bash
salt ' * ' state . clear _ cache''' | ret = [ ]
for fn_ in os . listdir ( __opts__ [ 'cachedir' ] ) :
if fn_ . endswith ( '.cache.p' ) :
path = os . path . join ( __opts__ [ 'cachedir' ] , fn_ )
if not os . path . isfile ( path ) :
continue
os . remove ( path )
ret . append ( fn_ )
return ret |
def list_expand ( d , prefix = None ) :
"""Recursively expand dictionaries into lists
e . g . list _ expand ( { 1 : { 2 : [ 3,4 ] } , 5 : [ 6 ] } ) = = [ ( 1,2,3 ) , ( 1,2,4 ) , ( 5,6 ) ]""" | if prefix is None :
prefix = tuple ( )
for k in d :
if isinstance ( d , dict ) :
for i in list_expand ( d [ k ] , prefix = tuple ( chain ( prefix , ( k , ) ) ) ) :
yield i
else :
yield tuple ( chain ( prefix , make_list ( k ) ) ) |
def explain ( self ) :
"""A debugging API , exposing SQLite ' s I { EXPLAIN } statement .
While this is not a private method , you also probably don ' t have any
use for it unless you understand U { SQLite
opcodes < http : / / www . sqlite . org / opcode . html > } very well .
Once you do , it can be handy to call this interactively to get a sense
of the complexity of a query .
@ return : a list , the first element of which is a L { str } ( the SQL
statement which will be run ) , and the remainder of which is 3 - tuples
resulting from the I { EXPLAIN } of that statement .""" | return ( [ self . _sqlAndArgs ( 'SELECT' , self . _queryTarget ) [ 0 ] ] + self . _runQuery ( 'EXPLAIN SELECT' , self . _queryTarget ) ) |
def _init_codebook ( self ) :
"""Internal function to set the codebook or to indicate it to the C + +
code that it should be randomly initialized .""" | codebook_size = self . _n_columns * self . _n_rows * self . n_dim
if self . codebook is None :
if self . _initialization == "random" :
self . codebook = np . zeros ( codebook_size , dtype = np . float32 )
self . codebook [ 0 : 2 ] = [ 1000 , 2000 ]
else :
self . _pca_init ( )
elif self . codebook . size != codebook_size :
raise Exception ( "Invalid size for initial codebook" )
else :
if self . codebook . dtype != np . float32 :
print ( "Warning: initialcodebook was not float32. A 32-bit " "copy was made" )
self . codebook = np . float32 ( self . codebook )
self . codebook . shape = ( codebook_size , ) |
def get_min_sec_from_morning ( self ) :
"""Get the first second from midnight where a timerange is effective
: return : smallest amount of second from midnight of all timerange
: rtype : int""" | mins = [ ]
for timerange in self . timeranges :
mins . append ( timerange . get_sec_from_morning ( ) )
return min ( mins ) |
def get_genus_type ( self ) :
"""Gets the genus type of this object .
return : ( osid . type . Type ) - the genus type of this object
* compliance : mandatory - - This method must be implemented . *""" | try : # Try to stand up full Type objects if they can be found
# ( Also need to LOOK FOR THE TYPE IN types or through type lookup )
genus_type_identifier = Id ( self . _my_map [ 'genusTypeId' ] ) . get_identifier ( )
return Type ( ** types . Genus ( ) . get_type_data ( genus_type_identifier ) )
except : # If that doesn ' t work , return the id only type , still useful for comparison .
return Type ( idstr = self . _my_map [ 'genusTypeId' ] ) |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_remote_system_name ( self , ** kwargs ) :
"""Auto Generated Code""" | config = ET . Element ( "config" )
get_lldp_neighbor_detail = ET . Element ( "get_lldp_neighbor_detail" )
config = get_lldp_neighbor_detail
output = ET . SubElement ( get_lldp_neighbor_detail , "output" )
lldp_neighbor_detail = ET . SubElement ( output , "lldp-neighbor-detail" )
local_interface_name_key = ET . SubElement ( lldp_neighbor_detail , "local-interface-name" )
local_interface_name_key . text = kwargs . pop ( 'local_interface_name' )
remote_interface_name_key = ET . SubElement ( lldp_neighbor_detail , "remote-interface-name" )
remote_interface_name_key . text = kwargs . pop ( 'remote_interface_name' )
remote_system_name = ET . SubElement ( lldp_neighbor_detail , "remote-system-name" )
remote_system_name . text = kwargs . pop ( 'remote_system_name' )
callback = kwargs . pop ( 'callback' , self . _callback )
return callback ( config ) |
def _cryptography_encrypt ( cipher_factory , plaintext , key , iv ) :
"""Use a cryptography cipher factory to encrypt data .
: param cipher _ factory : Factory callable that builds a cryptography Cipher
instance based on the key and IV
: type cipher _ factory : callable
: param bytes plaintext : Plaintext data to encrypt
: param bytes key : Encryption key
: param bytes IV : Initialization vector
: returns : Encrypted ciphertext
: rtype : bytes""" | encryptor = cipher_factory ( key , iv ) . encryptor ( )
return encryptor . update ( plaintext ) + encryptor . finalize ( ) |
def gradients_X ( self , dL_dK , X , X2 = None ) :
"""Compute the gradient of the objective function with respect to X .
: param dL _ dK : An array of gradients of the objective function with respect to the covariance function .
: type dL _ dK : np . ndarray ( num _ samples x num _ inducing )
: param X : Observed data inputs
: type X : np . ndarray ( num _ samples x input _ dim )
: param X2 : Observed data inputs ( optional , defaults to X )
: type X2 : np . ndarray ( num _ inducing x input _ dim )""" | target = np . zeros ( X . shape )
[ target . __iadd__ ( p . gradients_X ( dL_dK , X , X2 ) ) for p in self . parts ]
return target |
def _get_scale ( self ) :
"""Subclasses may override this method .""" | sx , sxy , syx , sy , ox , oy = self . transformation
return sx , sy |
def compress_dinf ( angle , nodata ) :
"""Compress dinf flow direction to D8 direction with weight follows ArcGIS D8 codes .
Args :
angle : D - inf flow direction angle
nodata : NoData value
Returns :
1 . Updated Dinf values
2 . Compressed flow direction follows ArcGIS D8 codes rule
3 . Weight of the first direction""" | if MathClass . floatequal ( angle , nodata ) :
return DEFAULT_NODATA , DEFAULT_NODATA , DEFAULT_NODATA
taud , d = DinfUtil . check_orthogonal ( angle )
if d != - 1 :
return taud , d , 1
if angle < FlowModelConst . ne :
a1 = angle
d = 129
# 1 + 128
elif angle < FlowModelConst . n :
a1 = angle - FlowModelConst . ne
d = 192
# 128 + 64
elif angle < FlowModelConst . nw :
a1 = angle - FlowModelConst . n
d = 96
# 64 + 32
elif angle < FlowModelConst . w :
a1 = angle - FlowModelConst . nw
d = 48
# 32 + 16
elif angle < FlowModelConst . sw :
a1 = angle - FlowModelConst . w
d = 24
# 16 + 8
elif angle < FlowModelConst . s :
a1 = angle - FlowModelConst . sw
d = 12
# 8 + 4
elif angle < FlowModelConst . se :
a1 = angle - FlowModelConst . s
d = 6
# 4 + 2
else :
a1 = angle - FlowModelConst . se
d = 3
# 2 + 1
return angle , d , a1 / PI * 4.0 |
def newDocNodeEatName ( self , ns , name , content ) :
"""Creation of a new node element within a document . @ ns and
@ content are optional ( None ) . NOTE : @ content is supposed to
be a piece of XML CDATA , so it allow entities references ,
but XML special chars need to be escaped first by using
xmlEncodeEntitiesReentrant ( ) . Use xmlNewDocRawNode ( ) if you
don ' t need entities support .""" | if ns is None :
ns__o = None
else :
ns__o = ns . _o
ret = libxml2mod . xmlNewDocNodeEatName ( self . _o , ns__o , name , content )
if ret is None :
raise treeError ( 'xmlNewDocNodeEatName() failed' )
__tmp = xmlNode ( _obj = ret )
return __tmp |
def to_element ( change ) :
"""@ param change : An L { txaws . route53 . interface . IRRSetChange } provider .
@ return : The L { twisted . web . template } element which describes this
change .""" | return tags . Change ( tags . Action ( change . action , ) , tags . ResourceRecordSet ( tags . Name ( unicode ( change . rrset . label ) , ) , tags . Type ( change . rrset . type , ) , tags . TTL ( u"{}" . format ( change . rrset . ttl ) , ) , tags . ResourceRecords ( list ( tags . ResourceRecord ( tags . Value ( rr . to_text ( ) ) ) for rr in sorted ( change . rrset . records ) ) ) ) , ) |
def _check_for_invalid_keys ( fname , kwargs , compat_args ) :
"""Checks whether ' kwargs ' contains any keys that are not
in ' compat _ args ' and raises a TypeError if there is one .""" | # set ( dict ) - - > set of the dictionary ' s keys
diff = set ( kwargs ) - set ( compat_args )
if diff :
bad_arg = list ( diff ) [ 0 ]
raise TypeError ( ( "{fname}() got an unexpected " "keyword argument '{arg}'" . format ( fname = fname , arg = bad_arg ) ) ) |
def make_class ( name , attrs , bases = ( object , ) , ** attributes_arguments ) :
"""A quick way to create a new class called * name * with * attrs * .
: param name : The name for the new class .
: type name : str
: param attrs : A list of names or a dictionary of mappings of names to
attributes .
If * attrs * is a list or an ordered dict ( : class : ` dict ` on Python 3.6 + ,
: class : ` collections . OrderedDict ` otherwise ) , the order is deduced from
the order of the names or attributes inside * attrs * . Otherwise the
order of the definition of the attributes is used .
: type attrs : : class : ` list ` or : class : ` dict `
: param tuple bases : Classes that the new class will subclass .
: param attributes _ arguments : Passed unmodified to : func : ` attr . s ` .
: return : A new class with * attrs * .
: rtype : type
. . versionadded : : 17.1.0 * bases *
. . versionchanged : : 18.1.0 If * attrs * is ordered , the order is retained .""" | if isinstance ( attrs , dict ) :
cls_dict = attrs
elif isinstance ( attrs , ( list , tuple ) ) :
cls_dict = dict ( ( a , attrib ( ) ) for a in attrs )
else :
raise TypeError ( "attrs argument must be a dict or a list." )
post_init = cls_dict . pop ( "__attrs_post_init__" , None )
type_ = type ( name , bases , { } if post_init is None else { "__attrs_post_init__" : post_init } , )
# For pickling to work , the _ _ module _ _ variable needs to be set to the
# frame where the class is created . Bypass this step in environments where
# sys . _ getframe is not defined ( Jython for example ) or sys . _ getframe is not
# defined for arguments greater than 0 ( IronPython ) .
try :
type_ . __module__ = sys . _getframe ( 1 ) . f_globals . get ( "__name__" , "__main__" )
except ( AttributeError , ValueError ) :
pass
return _attrs ( these = cls_dict , ** attributes_arguments ) ( type_ ) |
def fill_array ( self , array , weights = None ) :
"""Fill this histogram with a NumPy array""" | try :
try :
from root_numpy import fill_hist as fill_func
except ImportError :
from root_numpy import fill_array as fill_func
except ImportError :
log . critical ( "root_numpy is needed for Hist*.fill_array. " "Is it installed and importable?" )
raise
fill_func ( self , array , weights = weights ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.