signature stringlengths 29 44.1k | implementation stringlengths 0 85.2k |
|---|---|
def add_section ( self , name ) :
"""Append ` section ` to model
Arguments :
name ( str ) : Name of section""" | assert isinstance ( name , str )
# Skip existing sections
for section in self . sections :
if section . name == name :
return section
item = defaults [ "common" ] . copy ( )
item [ "name" ] = name
item [ "itemType" ] = "section"
item = self . add_item ( item )
self . sections . append ( item )
return item |
def get_contents_static ( self , block_alias , context ) :
"""Returns contents of a static block .""" | if 'request' not in context : # No use in further actions as we won ' t ever know current URL .
return ''
current_url = context [ 'request' ] . path
# Resolve current view name to support view names as block URLs .
try :
resolver_match = resolve ( current_url )
namespace = ''
if resolver_match . namespaces : # More than one namespace , really ? Hmm .
namespace = resolver_match . namespaces [ 0 ]
resolved_view_name = ':%s:%s' % ( namespace , resolver_match . url_name )
except Resolver404 :
resolved_view_name = None
self . _cache_init ( )
cache_entry_name = cache_get_key ( block_alias )
siteblocks_static = self . _cache_get ( cache_entry_name )
if not siteblocks_static :
blocks = Block . objects . filter ( alias = block_alias , hidden = False ) . only ( 'url' , 'contents' )
siteblocks_static = [ defaultdict ( list ) , defaultdict ( list ) ]
for block in blocks :
if block . url == '*' :
url_re = block . url
elif block . url . startswith ( ':' ) :
url_re = block . url
# Normalize URL name to include namespace .
if url_re . count ( ':' ) == 1 :
url_re = ':%s' % url_re
else :
url_re = re . compile ( r'%s' % block . url )
if block . access_guest :
siteblocks_static [ self . IDX_GUEST ] [ url_re ] . append ( block . contents )
elif block . access_loggedin :
siteblocks_static [ self . IDX_AUTH ] [ url_re ] . append ( block . contents )
else :
siteblocks_static [ self . IDX_GUEST ] [ url_re ] . append ( block . contents )
siteblocks_static [ self . IDX_AUTH ] [ url_re ] . append ( block . contents )
self . _cache_set ( cache_entry_name , siteblocks_static )
self . _cache_save ( )
user = getattr ( context [ 'request' ] , 'user' , None )
is_authenticated = getattr ( user , 'is_authenticated' , False )
if not DJANGO_2 :
is_authenticated = is_authenticated ( )
if is_authenticated :
lookup_area = siteblocks_static [ self . IDX_AUTH ]
else :
lookup_area = siteblocks_static [ self . IDX_GUEST ]
static_block_contents = ''
if '*' in lookup_area :
static_block_contents = choice ( lookup_area [ '*' ] )
elif resolved_view_name in lookup_area :
static_block_contents = choice ( lookup_area [ resolved_view_name ] )
else :
for url , contents in lookup_area . items ( ) :
if url . match ( current_url ) :
static_block_contents = choice ( contents )
break
return static_block_contents |
def send_cons3rt_agent_logs ( self ) :
"""Sends a Slack message with an attachment for each cons3rt agent log
: return :""" | log = logging . getLogger ( self . cls_logger + '.send_cons3rt_agent_logs' )
log . debug ( 'Searching for log files in directory: {d}' . format ( d = self . dep . cons3rt_agent_log_dir ) )
for item in os . listdir ( self . dep . cons3rt_agent_log_dir ) :
item_path = os . path . join ( self . dep . cons3rt_agent_log_dir , item )
if os . path . isfile ( item_path ) :
log . debug ( 'Adding slack attachment with cons3rt agent log file: {f}' . format ( f = item_path ) )
try :
with open ( item_path , 'r' ) as f :
file_text = f . read ( )
except ( IOError , OSError ) as e :
log . warn ( 'There was a problem opening file: {f}\n{e}' . format ( f = item_path , e = e ) )
continue
# Take the last 7000 characters
file_text_trimmed = file_text [ - 7000 : ]
attachment = SlackAttachment ( fallback = file_text_trimmed , text = file_text_trimmed , color = '#9400D3' )
self . add_attachment ( attachment )
self . send ( ) |
def calculate_convolution_output_shapes ( operator ) :
'''Allowed input / output patterns are
1 . [ N , C , H , W ] - - - > [ N , C , H ' , W ' ]''' | check_input_and_output_numbers ( operator , input_count_range = 1 , output_count_range = 1 )
params = operator . raw_operator . convolution
input_shape = operator . inputs [ 0 ] . type . shape
operator . outputs [ 0 ] . type . shape = [ 0 , 0 , 0 , 0 ]
# Initialize output shape . It will be modified below .
output_shape = operator . outputs [ 0 ] . type . shape
# Adjust N - axis
output_shape [ 0 ] = input_shape [ 0 ]
# Adjust C - axis
output_shape [ 1 ] = params . outputChannels
# Set up default and non - default parameters
dilations = [ 1 , 1 ]
if len ( params . dilationFactor ) > 0 :
dilations = [ params . dilationFactor [ 0 ] , params . dilationFactor [ 1 ] ]
kernel_shape = [ 3 , 3 ]
if len ( params . kernelSize ) > 0 :
kernel_shape = params . kernelSize
strides = [ 1 , 1 ]
if len ( params . stride ) > 0 :
strides = params . stride
specified_output_shape = [ 0 , 0 ]
# Only used with convolution transpose
if params . isDeconvolution and len ( params . outputShape ) > 0 :
specified_output_shape = list ( int ( i ) for i in params . outputShape )
pad_mode = params . WhichOneof ( 'ConvolutionPaddingType' )
if pad_mode == 'valid' and len ( params . valid . paddingAmounts . borderAmounts ) > 0 :
pad_amounts = params . valid . paddingAmounts . borderAmounts
pad_heads = [ pad_amounts [ 0 ] . startEdgeSize , pad_amounts [ 1 ] . startEdgeSize ]
pad_tails = [ pad_amounts [ 0 ] . endEdgeSize , pad_amounts [ 1 ] . endEdgeSize ]
else : # Padding amounts are useless for same padding and valid padding uses [ 0 , 0 ] by default .
pad_heads = [ 0 , 0 ]
pad_tails = [ 0 , 0 ]
# Adjust H - and W - axes
for i in range ( 2 ) :
if params . isDeconvolution :
output_shape [ i + 2 ] = calculate_convolution_transpose_1D_output_shape ( input_shape [ i + 2 ] , kernel_shape [ i ] , dilations [ i ] , strides [ i ] , pad_mode , pad_heads [ i ] , pad_tails [ i ] , specified_output_shape [ i ] )
else :
output_shape [ i + 2 ] = calculate_convolution_and_pooling_1D_output_shape ( input_shape [ i + 2 ] , kernel_shape [ i ] , dilations [ i ] , strides [ i ] , pad_mode , pad_heads [ i ] , pad_tails [ i ] ) |
def get_lemma_by_id ( self , mongo_id ) :
'''Builds a Lemma object from the database entry with the given
ObjectId .
Arguments :
- ` mongo _ id ` : a bson . objectid . ObjectId object''' | cache_hit = None
if self . _lemma_cache is not None :
cache_hit = self . _lemma_cache . get ( mongo_id )
if cache_hit is not None :
return cache_hit
lemma_dict = self . _mongo_db . lexunits . find_one ( { '_id' : mongo_id } )
if lemma_dict is not None :
lemma = Lemma ( self , lemma_dict )
if self . _lemma_cache is not None :
self . _lemma_cache . put ( mongo_id , lemma )
return lemma |
def showInvLines ( rh ) :
"""Produce help output related to command synopsis
Input :
Request Handle""" | if rh . subfunction != '' :
rh . printLn ( "N" , "Usage:" )
rh . printLn ( "N" , " python " + rh . cmdName + " GetHost " + "diskpoolnames" )
rh . printLn ( "N" , " python " + rh . cmdName + " GetHost " + "diskpoolspace <poolName>" )
rh . printLn ( "N" , " python " + rh . cmdName + " GetHost fcpdevices" )
rh . printLn ( "N" , " python " + rh . cmdName + " GetHost general" )
rh . printLn ( "N" , " python " + rh . cmdName + " GetHost help" )
rh . printLn ( "N" , " python " + rh . cmdName + " GetHost version" )
return |
def recursive_merge_dicts ( x , y , misses = "report" , verbose = None ) :
"""Merge dictionary y into a copy of x , overwriting elements of x when there
is a conflict , except if the element is a dictionary , in which case recurse .
misses : what to do if a key in y is not in x
' insert ' - > set x [ key ] = value
' exception ' - > raise an exception
' report ' - > report the name of the missing key
' ignore ' - > do nothing
TODO : give example here ( pull from tests )""" | def recurse ( x , y , misses = "report" , verbose = 1 ) :
found = True
for k , v in y . items ( ) :
found = False
if k in x :
found = True
if isinstance ( x [ k ] , dict ) :
if not isinstance ( v , dict ) :
msg = f"Attempted to overwrite dict {k} with " f"non-dict: {v}"
raise ValueError ( msg )
recurse ( x [ k ] , v , misses , verbose )
else :
if x [ k ] == v :
msg = f"Reaffirming {k}={x[k]}"
else :
msg = f"Overwriting {k}={x[k]} to {k}={v}"
x [ k ] = v
if verbose > 1 and k != "verbose" :
print ( msg )
else :
for kx , vx in x . items ( ) :
if isinstance ( vx , dict ) :
found = recurse ( vx , { k : v } , misses = "ignore" , verbose = verbose )
if found :
break
if not found :
msg = f'Could not find kwarg "{k}" in destination dict.'
if misses == "insert" :
x [ k ] = v
if verbose > 1 :
print ( f"Added {k}={v} from second dict to first" )
elif misses == "exception" :
raise ValueError ( msg )
elif misses == "report" :
print ( msg )
else :
pass
return found
# If verbose is not provided , look for an value in y first , then x
# ( Do this because ' verbose ' kwarg is often inside one or both of x and y )
if verbose is None :
verbose = y . get ( "verbose" , x . get ( "verbose" , 1 ) )
z = copy . deepcopy ( x )
recurse ( z , y , misses , verbose )
return z |
def get_delete_url_link ( self , text = None , cls = None , icon_class = None , ** attrs ) :
"""Gets the html delete link for the object .""" | if text is None :
text = 'Delete'
return build_link ( href = self . get_delete_url ( ) , text = text , cls = cls , icon_class = icon_class , ** attrs ) |
def signed_session ( self , session = None ) :
"""Sign requests session with the token . This method is called every time a request is going on the wire .
The user is responsible for updating the token with the preferred tool / SDK .
In general there are two options :
- override this method to update the token in a preferred way and set Authorization header on session
- not override this method , and have a timer that triggers periodically to update the token on this class
The second option is recommended as it tends to be more performance - friendly .
: param session : The session to configure for authentication
: type session : requests . Session
: rtype : requests . Session""" | session = session or requests . Session ( )
session . headers [ 'Authorization' ] = "Bearer {}" . format ( self . token )
return session |
def check_type ( stochastic ) :
"""type , shape = check _ type ( stochastic )
Checks the type of a stochastic ' s value . Output value ' type ' may be
bool , int , float , or complex . Nonnative numpy dtypes are lumped into
these categories . Output value ' shape ' is ( ) if the stochastic ' s value
is scalar , or a nontrivial tuple otherwise .""" | val = stochastic . value
if val . __class__ in bool_dtypes :
return bool , ( )
elif val . __class__ in integer_dtypes :
return int , ( )
elif val . __class__ in float_dtypes :
return float , ( )
elif val . __class__ in complex_dtypes :
return complex , ( )
elif isinstance ( val , ndarray ) :
if obj2sctype ( val ) in bool_dtypes :
return bool , val . shape
elif obj2sctype ( val ) in integer_dtypes :
return int , val . shape
elif obj2sctype ( val ) in float_dtypes :
return float , val . shape
elif obj2sctype ( val ) in complex_dtypes :
return complex , val . shape
else :
return 'object' , val . shape
else :
return 'object' , ( ) |
def to_array ( self ) :
"""Serializes this EncryptedCredentials to a dictionary .
: return : dictionary representation of this object .
: rtype : dict""" | array = super ( EncryptedCredentials , self ) . to_array ( )
array [ 'data' ] = u ( self . data )
# py2 : type unicode , py3 : type str
array [ 'hash' ] = u ( self . hash )
# py2 : type unicode , py3 : type str
array [ 'secret' ] = u ( self . secret )
# py2 : type unicode , py3 : type str
return array |
def replace_subject ( rdf , fromuri , touri ) :
"""Replace occurrences of fromuri as subject with touri in given model .
If touri = None , will delete all occurrences of fromuri instead .
If touri is a list or tuple of URIRefs , all values will be inserted .""" | if fromuri == touri :
return
for p , o in rdf . predicate_objects ( fromuri ) :
rdf . remove ( ( fromuri , p , o ) )
if touri is not None :
if not isinstance ( touri , ( list , tuple ) ) :
touri = [ touri ]
for uri in touri :
rdf . add ( ( uri , p , o ) ) |
def headinside ( self ) :
"""The head inside the well
Returns
array ( length number of screens )
Head inside the well for each screen""" | h = self . model . head ( self . xw + self . rw , self . yw , layers = self . layers )
return h - self . resfac * self . parameters [ : , 0 ] |
def version_dict ( version ) :
"""Turn a version string into a dict with major / minor / . . . info .""" | match = version_re . match ( str ( version ) or '' )
letters = 'alpha pre' . split ( )
numbers = 'major minor1 minor2 minor3 alpha_ver pre_ver' . split ( )
if match :
d = match . groupdict ( )
for letter in letters :
d [ letter ] = d [ letter ] if d [ letter ] else None
for num in numbers :
if d [ num ] == '*' :
d [ num ] = 99
else :
d [ num ] = int ( d [ num ] ) if d [ num ] else None
else :
d = dict ( ( k , None ) for k in numbers )
d . update ( ( k , None ) for k in letters )
return d |
def parse_accept_header ( accept ) :
"""Parse an HTTP Accept header .
Parses * accept * , returning a list with pairs of
( media _ type , q _ value ) , ordered by q values .
Adapted from < https : / / djangosnippets . org / snippets / 1042 / >""" | result = [ ]
for media_range in accept . split ( "," ) :
parts = media_range . split ( ";" )
media_type = parts . pop ( 0 ) . strip ( )
media_params = [ ]
q = 1.0
for part in parts :
( key , value ) = part . lstrip ( ) . split ( "=" , 1 )
if key == "q" :
q = float ( value )
else :
media_params . append ( ( key , value ) )
result . append ( ( media_type , tuple ( media_params ) , q ) )
result . sort ( key = lambda x : - x [ 2 ] )
return result |
def find_nearest_color_hexstr ( hexdigits , color_table = None , method = 'euclid' ) :
'''Given a three or six - character hex digit string , return the nearest
color index .
Arguments :
hexdigits : a three / 6 digit hex string , e . g . ' b0b ' , ' 123456'
Returns :
int , None : index , or None on error .''' | triplet = [ ]
try :
if len ( hexdigits ) == 3 :
for digit in hexdigits :
digit = int ( digit , 16 )
triplet . append ( ( digit * 16 ) + digit )
elif len ( hexdigits ) == 6 :
triplet . extend ( int ( hexdigits [ i : i + 2 ] , 16 ) for i in ( 0 , 2 , 4 ) )
else :
raise ValueError ( 'wrong length: %r' % hexdigits )
except ValueError :
return None
return find_nearest_color_index ( * triplet , color_table = color_table , method = method ) |
def connect ( self , signal , ** kwargs ) :
"""Connect a specific signal type to this receiver .""" | signal . connect ( self , ** kwargs )
self . connections . append ( ( signal , kwargs ) ) |
async def result ( self , timeout : Optional [ float ] = None , * , pole_delay : float = 0.5 ) -> Any :
"""Get the result of the job , including waiting if it ' s not yet available . If the job raised an exception ,
it will be raised here .
: param timeout : maximum time to wait for the job result before raising ` ` TimeoutError ` ` , will wait forever
: param pole _ delay : how often to poll redis for the job result""" | async for delay in poll ( pole_delay ) :
info = await self . result_info ( )
if info :
result = info . result
if info . success :
return result
else :
raise result
if timeout is not None and delay > timeout :
raise asyncio . TimeoutError ( ) |
def pCOH ( self ) :
"""Partial coherence .
. . math : : \ mathrm { pCOH } _ { ij } ( f ) = \\ frac { G _ { ij } ( f ) }
{ \ sqrt { G _ { ii } ( f ) G _ { jj } ( f ) } }
References
P . J . Franaszczuk , K . J . Blinowska , M . Kowalczyk . The application of
parametric multichannel spectral estimates in the study of electrical
brain activity . Biol . Cybernetics 51(4 ) : 239-247 , 1985.""" | G = self . G ( )
# TODO : can we do that more efficiently ?
return G / np . sqrt ( np . einsum ( 'ii..., jj... ->ij...' , G , G ) ) |
def smallest_prime_factor ( Q ) :
"""Find the smallest number factorable by the small primes 2 , 3 , 4 , and 7
that is larger than the argument Q""" | A = Q ;
while ( A != 1 ) :
if ( np . mod ( A , 2 ) == 0 ) :
A = A / 2
elif ( np . mod ( A , 3 ) == 0 ) :
A = A / 3
elif ( np . mod ( A , 5 ) == 0 ) :
A = A / 5
elif ( np . mod ( A , 7 ) == 0 ) :
A = A / 7 ;
else :
A = Q + 1 ;
Q = A ;
return Q |
def nn_setsockopt ( socket , level , option , value ) :
"""set a socket option
socket - socket number
level - option level
option - option
value - a readable byte buffer ( not a Unicode string ) containing the value
returns - 0 on success or < 0 on error""" | try :
return _nn_setsockopt ( socket , level , option , ctypes . addressof ( value ) , len ( value ) )
except ( TypeError , AttributeError ) :
buf_value = ctypes . create_string_buffer ( value )
return _nn_setsockopt ( socket , level , option , ctypes . addressof ( buf_value ) , len ( value ) ) |
def str ( self , local = False , ifempty = None ) :
"""Returns the string representation of the datetime""" | ts = self . get ( local )
if not ts :
return ifempty
return ts . strftime ( '%Y-%m-%d %H:%M:%S' ) |
def filter_db_names ( paths : List [ str ] ) -> List [ str ] :
"""Returns a filtered list of ` paths ` , where every name matches our format .
Args :
paths : A list of file names .""" | return [ db_path for db_path in paths if VERSION_RE . match ( os . path . basename ( db_path ) ) ] |
def get_lightcurve ( self , star_id , return_1d = True ) :
"""Get the light curves for the given ID
Parameters
star _ id : int
A valid integer star id representing an object in the dataset
return _ 1d : boolean ( default = True )
Specify whether to return 1D arrays of ( t , y , dy , filts ) or
2D arrays of ( t , y , dy ) where each column is a filter .
Returns
t , y , dy : np . ndarrays ( if return _ 1d = = False )
Times , magnitudes , and magnitude errors .
The shape of each array is [ Nobs , 5 ] , where the columns refer
to [ u , g , r , i , z ] bands . Non - observations are indicated by NaN .
t , y , dy , filts : np . ndarrays ( if return _ 1d = = True )
Times , magnitudes , magnitude errors , and filters
The shape of each array is [ Nobs ] , and non - observations are
filtered out .""" | filename = '{0}/{1}.dat' . format ( self . dirname , star_id )
try :
data = np . loadtxt ( self . data . extractfile ( filename ) )
except KeyError :
raise ValueError ( "invalid star id: {0}" . format ( star_id ) )
RA = data [ : , 0 ]
DEC = data [ : , 1 ]
t = data [ : , 2 : : 3 ]
y = data [ : , 3 : : 3 ]
dy = data [ : , 4 : : 3 ]
nans = ( y == - 99.99 )
t [ nans ] = np . nan
y [ nans ] = np . nan
dy [ nans ] = np . nan
if return_1d :
t , y , dy , filts = np . broadcast_arrays ( t , y , dy , [ 'u' , 'g' , 'r' , 'i' , 'z' ] )
good = ~ np . isnan ( t )
return t [ good ] , y [ good ] , dy [ good ] , filts [ good ]
else :
return t , y , dy |
def sort_by_padding ( instances : List [ Instance ] , sorting_keys : List [ Tuple [ str , str ] ] , # pylint : disable = invalid - sequence - index
vocab : Vocabulary , padding_noise : float = 0.0 ) -> List [ Instance ] :
"""Sorts the instances by their padding lengths , using the keys in
` ` sorting _ keys ` ` ( in the order in which they are provided ) . ` ` sorting _ keys ` ` is a list of
` ` ( field _ name , padding _ key ) ` ` tuples .""" | instances_with_lengths = [ ]
for instance in instances : # Make sure instance is indexed before calling . get _ padding
instance . index_fields ( vocab )
padding_lengths = cast ( Dict [ str , Dict [ str , float ] ] , instance . get_padding_lengths ( ) )
if padding_noise > 0.0 :
noisy_lengths = { }
for field_name , field_lengths in padding_lengths . items ( ) :
noisy_lengths [ field_name ] = add_noise_to_dict_values ( field_lengths , padding_noise )
padding_lengths = noisy_lengths
instance_with_lengths = ( [ padding_lengths [ field_name ] [ padding_key ] for ( field_name , padding_key ) in sorting_keys ] , instance )
instances_with_lengths . append ( instance_with_lengths )
instances_with_lengths . sort ( key = lambda x : x [ 0 ] )
return [ instance_with_lengths [ - 1 ] for instance_with_lengths in instances_with_lengths ] |
def gen_lower ( x : Iterable [ str ] ) -> Generator [ str , None , None ] :
"""Args :
x : iterable of strings
Yields :
each string in lower case""" | for string in x :
yield string . lower ( ) |
def onAuthenticate ( self , signature , extra ) :
"""Callback fired when a client responds to an authentication challenge .""" | log . msg ( "onAuthenticate: {} {}" . format ( signature , extra ) )
# # if there is a pending auth , and the signature provided by client matches . .
if self . _pending_auth :
if signature == self . _pending_auth . signature : # # accept the client
return types . Accept ( authid = self . _pending_auth . uid , authrole = self . _pending_auth . authrole , authmethod = self . _pending_auth . authmethod , authprovider = self . _pending_auth . authprovider )
else : # # deny client
return types . Deny ( message = u"signature is invalid" )
else : # # deny client
return types . Deny ( message = u"no pending authentication" ) |
def p_bexpr_func ( p ) :
"""bexpr : ID bexpr""" | args = make_arg_list ( make_argument ( p [ 2 ] , p . lineno ( 2 ) ) )
p [ 0 ] = make_call ( p [ 1 ] , p . lineno ( 1 ) , args )
if p [ 0 ] is None :
return
if p [ 0 ] . token in ( 'STRSLICE' , 'VAR' , 'STRING' ) :
entry = SYMBOL_TABLE . access_call ( p [ 1 ] , p . lineno ( 1 ) )
entry . accessed = True
return
# TODO : Check that arrays really needs kind = function to be set
# Both array accesses and functions are tagged as functions
# functions also has the class _ attribute set to ' function '
p [ 0 ] . entry . set_kind ( KIND . function , p . lineno ( 1 ) )
p [ 0 ] . entry . accessed = True |
def send_msg ( self , chat_id , msg_type , ** kwargs ) :
"""deprecated , use ` send ` instead""" | return self . send ( chat_id , msg_type , ** kwargs ) |
def is_token_revoked ( decoded_token ) :
"""Checks if the given token is revoked or not . Because we are adding all the
tokens that we create into this database , if the token is not present
in the database we are going to consider it revoked , as we don ' t know where
it was created .""" | jti = decoded_token [ 'jti' ]
try :
token = TokenBlacklist . query . filter_by ( jti = jti ) . one ( )
return token . revoked
except NoResultFound :
return True |
def add_member ( self , member ) :
"""Add a single member to the scope .
You may only edit the list of members if the pykechain credentials allow this .
: param member : single username to be added to the scope list of members
: type member : basestring
: raises APIError : when unable to update the scope member""" | select_action = 'add_member'
self . _update_scope_project_team ( select_action = select_action , user = member , user_type = 'member' ) |
def _copy_chunk ( src , dst , length ) :
"Copy length bytes from file src to file dst ." | BUFSIZE = 128 * 1024
while length > 0 :
l = min ( BUFSIZE , length )
buf = src . read ( l )
assert len ( buf ) == l
dst . write ( buf )
length -= l |
def _parse_metadata ( cls , line , meta , parse_remarks = True ) :
"""Parse a metadata line .
The metadata is organized as a ` ` key : value ` ` statement which
is split into the proper key and the proper value .
Arguments :
line ( str ) : the line containing the metadata
parse _ remarks ( bool , optional ) : set to ` False ` to avoid
parsing the remarks .
Note :
If the line follows the following schema :
` ` remark : key : value ` ` , the function will attempt to extract
the proper key / value instead of leaving everything inside
the remark key .
This may cause issues when the line is identified as such
even though the remark is simply a sentence containing a
colon , such as ` ` remark : 090506 " Attribute " ` ` in Term
deleted and new entries : Scan Type [ . . . ] "
( found in imagingMS . obo ) . To prevent the splitting from
happening , the text on the left of the colon must be less
that * 20 chars long * .""" | key , value = line . split ( ':' , 1 )
key , value = key . strip ( ) , value . strip ( )
if parse_remarks and "remark" in key : # Checking that the ' : ' is not
if 0 < value . find ( ': ' ) < 20 : # not too far avoid parsing a sentence
try : # containing a ' : ' as a key : value
cls . _parse_metadata ( value , meta , parse_remarks )
# obo statement nested in a remark
except ValueError : # (20 is arbitrary , it may require
pass
# tweaking )
else :
meta [ key ] . append ( value )
try :
syn_type_def = [ ]
for m in meta [ 'synonymtypedef' ] :
if not isinstance ( m , SynonymType ) :
x = SynonymType . from_obo ( m )
syn_type_def . append ( x )
else :
syn_type_def . append ( m )
except KeyError :
pass
else :
meta [ 'synonymtypedef' ] = syn_type_def |
def send ( self ) :
"""Entrypoint to send data to Zabbix
If debug is enabled , items are sent one by one
If debug isn ' t enable , we send items in bulk
Returns a list of results ( 1 if no debug , as many as items in other case )""" | if self . logger : # pragma : no cover
self . logger . info ( "Starting to send %d items" % len ( self . _items_list ) )
try : # Zabbix trapper send a maximum of 250 items in bulk
# We have to respect that , in case of enforcement on zabbix server side
# Special case if debug is enabled : we need to send items one by one
max_value = ZBX_TRAPPER_MAX_VALUE
if self . debug_level >= 4 :
max_value = 1
if self . logger : # pragma : no cover
self . logger . debug ( "Bulk limit is %d items" % max_value )
else :
if self . logger : # pragma : no cover
self . logger . info ( "Bulk limit is %d items" % max_value )
# Initialize offsets & counters
max_offset = len ( self . _items_list )
run = 0
start_offset = 0
stop_offset = min ( start_offset + max_value , max_offset )
server_success = server_failure = processed = failed = total = time = 0
while start_offset < stop_offset :
run += 1
if self . logger : # pragma : no cover
self . logger . debug ( 'run %d: start_offset is %d, stop_offset is %d' % ( run , start_offset , stop_offset ) )
# Extract items to be send from global item ' s list '
_items_to_send = self . items_list [ start_offset : stop_offset ]
# Send extracted items
run_response , run_processed , run_failed , run_total , run_time = self . _send_common ( _items_to_send )
# Update counters
if run_response == 'success' :
server_success += 1
elif run_response == 'failed' :
server_failure += 1
processed += run_processed
failed += run_failed
total += run_total
time += run_time
if self . logger : # pragma : no cover
self . logger . info ( "%d items sent during run %d" % ( run_total , run ) )
self . logger . debug ( 'run %d: processed is %d, failed is %d, total is %d' % ( run , run_processed , run_failed , run_total ) )
# Compute next run ' s offsets
start_offset = stop_offset
stop_offset = min ( start_offset + max_value , max_offset )
# Reset socket , which is likely to be closed by server
self . _socket_reset ( )
except :
self . _reset ( )
self . _socket_reset ( )
raise
if self . logger : # pragma : no cover
self . logger . info ( 'All %d items have been sent in %d runs' % ( total , run ) )
self . logger . debug ( 'Total run is %d; item processed: %d, failed: %d, total: %d, during %f seconds' % ( run , processed , failed , total , time ) )
# Everything has been sent .
# Reset DataContainer & return results _ list
self . _reset ( )
return server_success , server_failure , processed , failed , total , time |
def add_grammar ( self , customization_id , grammar_name , grammar_file , content_type , allow_overwrite = None , ** kwargs ) :
"""Add a grammar .
Adds a single grammar file to a custom language model . Submit a plain text file in
UTF - 8 format that defines the grammar . Use multiple requests to submit multiple
grammar files . You must use credentials for the instance of the service that owns
a model to add a grammar to it . Adding a grammar does not affect the custom
language model until you train the model for the new data by using the * * Train a
custom language model * * method .
The call returns an HTTP 201 response code if the grammar is valid . The service
then asynchronously processes the contents of the grammar and automatically
extracts new words that it finds . This can take a few seconds to complete
depending on the size and complexity of the grammar , as well as the current load
on the service . You cannot submit requests to add additional resources to the
custom model or to train the model until the service ' s analysis of the grammar for
the current request completes . Use the * * Get a grammar * * method to check the
status of the analysis .
The service populates the model ' s words resource with any word that is recognized
by the grammar that is not found in the model ' s base vocabulary . These are
referred to as out - of - vocabulary ( OOV ) words . You can use the * * List custom
words * * method to examine the words resource and use other words - related methods
to eliminate typos and modify how words are pronounced as needed .
To add a grammar that has the same name as an existing grammar , set the
` allow _ overwrite ` parameter to ` true ` ; otherwise , the request fails . Overwriting
an existing grammar causes the service to process the grammar file and extract OOV
words anew . Before doing so , it removes any OOV words associated with the existing
grammar from the model ' s words resource unless they were also added by another
resource or they have been modified in some way with the * * Add custom words * * or
* * Add a custom word * * method .
The service limits the overall amount of data that you can add to a custom model
to a maximum of 10 million total words from all sources combined . Also , you can
add no more than 30 thousand OOV words to a model . This includes words that the
service extracts from corpora and grammars and words that you add directly .
* * See also : * *
* [ Working with grammars ] ( https : / / cloud . ibm . com / docs / services / speech - to - text / )
* [ Add grammars to the custom language
model ] ( https : / / cloud . ibm . com / docs / services / speech - to - text / ) .
: param str customization _ id : The customization ID ( GUID ) of the custom language
model that is to be used for the request . You must make the request with
credentials for the instance of the service that owns the custom model .
: param str grammar _ name : The name of the new grammar for the custom language
model . Use a localized name that matches the language of the custom model and
reflects the contents of the grammar .
* Include a maximum of 128 characters in the name .
* Do not include spaces , slashes , or backslashes in the name .
* Do not use the name of an existing grammar or corpus that is already defined for
the custom model .
* Do not use the name ` user ` , which is reserved by the service to denote custom
words that are added or modified by the user .
: param str grammar _ file : A plain text file that contains the grammar in the format
specified by the ` Content - Type ` header . Encode the file in UTF - 8 ( ASCII is a
subset of UTF - 8 ) . Using any other encoding can lead to issues when compiling the
grammar or to unexpected results in decoding . The service ignores an encoding that
is specified in the header of the grammar .
: param str content _ type : The format ( MIME type ) of the grammar file :
* ` application / srgs ` for Augmented Backus - Naur Form ( ABNF ) , which uses a
plain - text representation that is similar to traditional BNF grammars .
* ` application / srgs + xml ` for XML Form , which uses XML elements to represent the
grammar .
: param bool allow _ overwrite : If ` true ` , the specified grammar overwrites an
existing grammar with the same name . If ` false ` , the request fails if a grammar
with the same name already exists . The parameter has no effect if a grammar with
the same name does not already exist .
: param dict headers : A ` dict ` containing the request headers
: return : A ` DetailedResponse ` containing the result , headers and HTTP status code .
: rtype : DetailedResponse""" | if customization_id is None :
raise ValueError ( 'customization_id must be provided' )
if grammar_name is None :
raise ValueError ( 'grammar_name must be provided' )
if grammar_file is None :
raise ValueError ( 'grammar_file must be provided' )
if content_type is None :
raise ValueError ( 'content_type must be provided' )
headers = { 'Content-Type' : content_type }
if 'headers' in kwargs :
headers . update ( kwargs . get ( 'headers' ) )
sdk_headers = get_sdk_headers ( 'speech_to_text' , 'V1' , 'add_grammar' )
headers . update ( sdk_headers )
params = { 'allow_overwrite' : allow_overwrite }
data = grammar_file
url = '/v1/customizations/{0}/grammars/{1}' . format ( * self . _encode_path_vars ( customization_id , grammar_name ) )
response = self . request ( method = 'POST' , url = url , headers = headers , params = params , data = data , accept_json = True )
return response |
def _split_lines ( self , original_lines : List [ str ] ) -> List [ str ] :
"""Splits the original lines list according to the current console width and group indentations .
: param original _ lines : The original lines list to split .
: return : A list of the new width - formatted lines .""" | console_width = get_console_width ( )
# We take indent into account only in the inner group lines .
max_line_length = console_width - len ( self . LINE_SEP ) - self . _last_position - ( self . indents_sum if not self . _is_first_line else self . indents_sum - self . _indents [ - 1 ] )
lines = [ ]
for i , line in enumerate ( original_lines ) :
fixed_line = [ ]
colors_counter = 0
line_index = 0
while line_index < len ( line ) :
c = line [ line_index ]
# Check if we ' re in a color block .
if self . _colors and c == self . _ANSI_COLOR_PREFIX and len ( line ) >= ( line_index + self . _ANSI_COLOR_LENGTH ) :
current_color = line [ line_index : line_index + self . _ANSI_COLOR_LENGTH ]
# If it really is a color , skip it .
if self . _ANSI_REGEXP . match ( current_color ) :
line_index += self . _ANSI_COLOR_LENGTH
fixed_line . extend ( list ( current_color ) )
colors_counter += 1
continue
fixed_line . append ( line [ line_index ] )
line_index += 1
# Create a new line , if max line is reached .
if len ( fixed_line ) >= max_line_length + ( colors_counter * self . _ANSI_COLOR_LENGTH ) : # Special case in which we want to split right before the line break .
if len ( line ) > line_index and line [ line_index ] == self . LINE_SEP :
continue
line_string = '' . join ( fixed_line )
if not line_string . endswith ( self . LINE_SEP ) :
line_string += self . LINE_SEP
lines . append ( line_string )
fixed_line = [ ]
colors_counter = 0
self . _last_position = 0
# Max line length has changed since the last position is now 0.
max_line_length = console_width - len ( self . LINE_SEP ) - self . indents_sum
self . _is_first_line = False
if len ( fixed_line ) > 0 :
fixed_line = '' . join ( fixed_line )
# If this line contains only color codes , attach it to the last line instead of creating a new one .
if len ( fixed_line ) == self . _ANSI_COLOR_LENGTH and self . _ANSI_REGEXP . match ( fixed_line ) is not None and len ( lines ) > 0 :
lines [ - 1 ] = lines [ - 1 ] [ : - 1 ] + fixed_line
else :
lines . append ( fixed_line )
return lines |
def structure_transform ( self , original_structure , new_structure , refine_rotation = True ) :
"""Transforms a tensor from one basis for an original structure
into a new basis defined by a new structure .
Args :
original _ structure ( Structure ) : structure corresponding
to the basis of the current tensor
new _ structure ( Structure ) : structure corresponding to the
desired basis
refine _ rotation ( bool ) : whether to refine the rotations
generated in get _ ieee _ rotation
Returns :
Tensor that has been transformed such that its basis
corresponds to the new _ structure ' s basis""" | sm = StructureMatcher ( )
if not sm . fit ( original_structure , new_structure ) :
warnings . warn ( "original and new structures do not match!" )
trans_1 = self . get_ieee_rotation ( original_structure , refine_rotation )
trans_2 = self . get_ieee_rotation ( new_structure , refine_rotation )
# Get the ieee format tensor
new = self . rotate ( trans_1 )
# Reverse the ieee format rotation for the second structure
new = new . rotate ( np . transpose ( trans_2 ) )
return new |
def replace_masked_values ( tensor : torch . Tensor , mask : torch . Tensor , replace_with : float ) -> torch . Tensor :
"""Replaces all masked values in ` ` tensor ` ` with ` ` replace _ with ` ` . ` ` mask ` ` must be broadcastable
to the same shape as ` ` tensor ` ` . We require that ` ` tensor . dim ( ) = = mask . dim ( ) ` ` , as otherwise we
won ' t know which dimensions of the mask to unsqueeze .
This just does ` ` tensor . masked _ fill ( ) ` ` , except the pytorch method fills in things with a mask
value of 1 , where we want the opposite . You can do this in your own code with
` ` tensor . masked _ fill ( ( 1 - mask ) . byte ( ) , replace _ with ) ` ` .""" | if tensor . dim ( ) != mask . dim ( ) :
raise ConfigurationError ( "tensor.dim() (%d) != mask.dim() (%d)" % ( tensor . dim ( ) , mask . dim ( ) ) )
return tensor . masked_fill ( ( 1 - mask ) . byte ( ) , replace_with ) |
def program ( self , * , vertex_shader , fragment_shader = None , geometry_shader = None , tess_control_shader = None , tess_evaluation_shader = None , varyings = ( ) ) -> 'Program' :
'''Create a : py : class : ` Program ` object .
Only linked programs will be returned .
A single shader in the ` shaders ` parameter is also accepted .
The varyings are only used when a transform program is created .
Args :
shaders ( list ) : A list of : py : class : ` Shader ` objects .
varyings ( list ) : A list of varying names .
Returns :
: py : class : ` Program ` object''' | if type ( varyings ) is str :
varyings = ( varyings , )
varyings = tuple ( varyings )
res = Program . __new__ ( Program )
res . mglo , ls1 , ls2 , ls3 , ls4 , ls5 , res . _subroutines , res . _geom , res . _glo = self . mglo . program ( vertex_shader , fragment_shader , geometry_shader , tess_control_shader , tess_evaluation_shader , varyings )
members = { }
for item in ls1 :
obj = Attribute . __new__ ( Attribute )
obj . mglo , obj . _location , obj . _array_length , obj . _dimension , obj . _shape , obj . _name = item
members [ obj . name ] = obj
for item in ls2 :
obj = Varying . __new__ ( Varying )
obj . _number , obj . _array_length , obj . _dimension , obj . _name = item
members [ obj . name ] = obj
for item in ls3 :
obj = Uniform . __new__ ( Uniform )
obj . mglo , obj . _location , obj . _array_length , obj . _dimension , obj . _name = item
members [ obj . name ] = obj
for item in ls4 :
obj = UniformBlock . __new__ ( UniformBlock )
obj . mglo , obj . _index , obj . _size , obj . _name = item
members [ obj . name ] = obj
for item in ls5 :
obj = Subroutine . __new__ ( Subroutine )
obj . _index , obj . _name = item
members [ obj . name ] = obj
res . _members = members
res . ctx = self
res . extra = None
return res |
def snapshots_create ( container , name = None , remote_addr = None , cert = None , key = None , verify_cert = True ) :
'''Create a snapshot for a container
container :
The name of the container to get .
name :
The name of the snapshot .
remote _ addr :
An URL to a remote server . The ' cert ' and ' key ' fields must also be
provided if ' remote _ addr ' is defined .
Examples :
https : / / myserver . lan : 8443
/ var / lib / mysocket . sock
cert :
PEM Formatted SSL Certificate .
Examples :
~ / . config / lxc / client . crt
key :
PEM Formatted SSL Key .
Examples :
~ / . config / lxc / client . key
verify _ cert : True
Verify the ssl certificate . Default : True
CLI Examples :
. . code - block : : bash
$ salt ' * ' lxd . snapshots _ create test - container test - snapshot''' | cont = container_get ( container , remote_addr , cert , key , verify_cert , _raw = True )
if not name :
name = datetime . now ( ) . strftime ( '%Y%m%d%H%M%S' )
cont . snapshots . create ( name )
for c in snapshots_all ( container ) . get ( container ) :
if c . get ( 'name' ) == name :
return { 'name' : name }
return { 'name' : False } |
def convert_pro_to_hyp ( pro ) :
"""Converts a pro residue to a hydroxypro residue .
All metadata associated with the original pro will be lost i . e . tags .
As a consequence , it is advisable to relabel all atoms in the structure
in order to make them contiguous .
Parameters
pro : ampal . Residue
The proline residue to be mutated to hydroxyproline .
Examples
We can create a collagen model using isambard and convert every third
residue to hydroxyproline :
> > > import isambard
> > > col = isambard . specifications . CoiledCoil . tropocollagen ( aa = 21)
> > > col . pack _ new _ sequences ( [ ' GPPGPPGPPGPPGPPGPPGPP ' ] * 3)
> > > to _ convert = [
. . . res for ( i , res ) in enumerate ( col . get _ monomers ( ) )
. . . if not ( i + 1 ) % 3]
> > > for pro in to _ convert :
. . . isambard . ampal . non _ canonical . convert _ pro _ to _ hyp ( pro )
> > > col . sequences
[ ' GPXGPXGPXGPXGPXGPXGPX ' , ' GPXGPXGPXGPXGPXGPXGPX ' , ' GPXGPXGPXGPXGPXGPXGPX ' ]""" | with open ( str ( REF_PATH / 'hydroxyproline_ref_1bkv_0_6.pickle' ) , 'rb' ) as inf :
hyp_ref = pickle . load ( inf )
align_nab ( hyp_ref , pro )
to_remove = [ 'CB' , 'CG' , 'CD' ]
for ( label , atom ) in pro . atoms . items ( ) :
if atom . element == 'H' :
to_remove . append ( label )
for label in to_remove :
del pro . atoms [ label ]
for key , val in hyp_ref . atoms . items ( ) :
if key not in pro . atoms . keys ( ) :
pro . atoms [ key ] = val
pro . mol_code = 'HYP'
pro . mol_letter = 'X'
pro . is_hetero = True
pro . tags = { }
pro . states = { 'A' : pro . atoms }
pro . active_state = 'A'
for atom in pro . get_atoms ( ) :
atom . ampal_parent = pro
atom . tags = { 'bfactor' : 1.0 , 'charge' : ' ' , 'occupancy' : 1.0 , 'state' : 'A' }
return |
def prepend ( self , key , val , time = 0 , min_compress_len = 0 ) :
'''Prepend the value to the beginning of the existing key ' s value .
Only stores in memcache if key already exists .
Also see L { append } .
@ return : Nonzero on success .
@ rtype : int''' | return self . _set ( "prepend" , key , val , time , min_compress_len ) |
def groups ( self , user , include = None ) :
"""Retrieve the groups for this user .
: param include : list of objects to sideload . ` Side - loading API Docs
< https : / / developer . zendesk . com / rest _ api / docs / core / side _ loading > ` _ _ .
: param user : User object or id""" | return self . _query_zendesk ( self . endpoint . groups , 'group' , id = user , include = include ) |
def qhashform ( o ) :
'''qhashform ( o ) yields a version of o , if possible , that yields a hash that can be reproduced
across instances . This correctly handles quantities and numpy arrays , among other things .''' | if is_quantity ( o ) :
return ( '__#quant' , qhashform ( mag ( o ) ) , str ( o . u ) )
elif isinstance ( o , np . ndarray ) and np . issubdtype ( o . dtype , np . dtype ( np . number ) . type ) :
return ( '__#ndarray' , o . tobytes ( ) )
elif isinstance ( o , ( set , frozenset ) ) :
return ( '__#set' , tuple ( [ qhashform ( x ) for x in o ] ) )
elif is_map ( o ) :
return ps . pmap ( { qhashform ( k ) : qhashform ( v ) for ( k , v ) in six . iteritems ( o ) } )
elif is_str ( o ) :
return o
elif hasattr ( o , '__iter__' ) :
return tuple ( [ qhashform ( u ) for u in o ] )
else :
return o |
def _update_params_on_kvstore_nccl ( param_arrays , grad_arrays , kvstore , param_names ) :
"""Perform update of param _ arrays from grad _ arrays on NCCL kvstore .""" | valid_indices = [ index for index , grad_list in enumerate ( grad_arrays ) if grad_list [ 0 ] is not None ]
valid_grad_arrays = [ grad_arrays [ i ] for i in valid_indices ]
valid_param_arrays = [ param_arrays [ i ] for i in valid_indices ]
valid_param_names = [ param_names [ i ] for i in valid_indices ]
size = len ( valid_grad_arrays )
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int ( os . getenv ( 'MXNET_UPDATE_AGGREGATION_SIZE' , default_batch ) )
while start < size :
end = start + batch if start + batch < size else size
# push gradient , priority is negative index
kvstore . push ( valid_param_names [ start : end ] , valid_grad_arrays [ start : end ] , priority = - start )
# pull back the weights
kvstore . pull ( valid_param_names [ start : end ] , valid_param_arrays [ start : end ] , priority = - start )
start = end |
def get_path ( self , name , default = None ) :
"""Retrieves an environment variable as a filesystem path .
Requires the ` pathlib ` _ library if using Python < = 3.4.
Args :
name ( str ) : The case - insensitive , unprefixed variable name .
default : If provided , a default value will be returned
instead of throwing ` ` EnvironmentError ` ` .
Returns :
pathlib . Path : The environment variable as a ` ` pathlib . Path ` `
object .
Raises :
EnvironmentError : If the environment variable does not
exist , and ` ` default ` ` was not provided .
. . _ pathlib :
https : / / pypi . python . org / pypi / pathlib /""" | if name not in self :
if default is not None :
return default
raise EnvironmentError . not_found ( self . _prefix , name )
return pathlib . Path ( self [ name ] ) |
def scaled_euclid_dist ( self , X , X2 ) : # pragma : no cover
"""Returns | ( X - X2T ) / lengthscales | ( L2 - norm ) .""" | warnings . warn ( 'scaled_euclid_dist is deprecated and will be removed ' 'in GPflow version 1.4.0. For stationary kernels, ' 'define K_r(r) instead.' , DeprecationWarning )
r2 = self . scaled_square_dist ( X , X2 )
return self . _clipped_sqrt ( r2 ) |
def network_del_notif ( self , tenant_id , tenant_name , net_id ) :
"""Network delete notification .""" | if not self . fw_init :
return
self . network_delete_notif ( tenant_id , tenant_name , net_id ) |
def getctime ( self , path = None , client_kwargs = None , header = None ) :
"""Return the creation time of path .
Args :
path ( str ) : File path or URL .
client _ kwargs ( dict ) : Client arguments .
header ( dict ) : Object header .
Returns :
float : The number of seconds since the epoch
( see the time module ) .""" | return self . _getctime_from_header ( self . head ( path , client_kwargs , header ) ) |
def do_child_count ( self , params ) :
"""\x1b [1mNAME \x1b [0m
child _ count - Prints the child count for paths
\x1b [1mSYNOPSIS \x1b [0m
child _ count [ path ] [ depth ]
\x1b [1mOPTIONS \x1b [0m
* path : the path ( default : cwd )
* max _ depth : max recursion limit ( 0 is no limit ) ( default : 1)
\x1b [1mEXAMPLES \x1b [0m
> child - count /
/ zookeeper : 2
/ foo : 0
/ bar : 3""" | for child , level in self . _zk . tree ( params . path , params . depth , full_path = True ) :
self . show_output ( "%s: %d" , child , self . _zk . child_count ( child ) ) |
def frontage ( self ) :
"""Length in Feet ( f )""" | if self . length and self . area :
return round ( self . area / self . length ) |
def fix_ar_analyses_inconsistencies ( portal ) :
"""Fixes inconsistencies between analyses and the ARs they belong to when
the AR is in a " cancelled " , " invalidated " or " rejected state""" | def fix_analyses ( request , status ) :
wf_id = "bika_analysis_workflow"
workflow = api . get_tool ( "portal_workflow" ) . getWorkflowById ( wf_id )
review_states = [ 'assigned' , 'unassigned' , 'to_be_verified' ]
query = dict ( portal_type = "Analysis" , getRequestUID = api . get_uid ( request ) , review_state = review_states )
for brain in api . search ( query , CATALOG_ANALYSIS_LISTING ) :
analysis = api . get_object ( brain )
# If the analysis is assigned to a worksheet , unassign first
ws = analysis . getWorksheet ( )
if ws :
remove_analysis_from_worksheet ( analysis )
reindex_request ( analysis )
# Force the new state
changeWorkflowState ( analysis , wf_id , status )
workflow . updateRoleMappingsFor ( analysis )
analysis . reindexObject ( idxs = [ "review_state" , "is_active" ] )
def fix_ar_analyses ( status , wf_state_id = "review_state" ) :
brains = api . search ( { wf_state_id : status } , CATALOG_ANALYSIS_REQUEST_LISTING )
total = len ( brains )
for num , brain in enumerate ( brains ) :
if num % 100 == 0 :
logger . info ( "Fixing inconsistent analyses from {} ARs: {}/{}" . format ( status , num , total ) )
fix_analyses ( brain , status )
logger . info ( "Fixing Analysis Request - Analyses inconsistencies ..." )
pool = ActionHandlerPool . get_instance ( )
pool . queue_pool ( )
fix_ar_analyses ( "cancelled" )
fix_ar_analyses ( "invalid" )
fix_ar_analyses ( "rejected" )
pool . resume ( )
commit_transaction ( portal ) |
def is_full ( self ) :
"""Return whether the activity is full .""" | capacity = self . get_true_capacity ( )
if capacity != - 1 :
num_signed_up = self . eighthsignup_set . count ( )
return num_signed_up >= capacity
return False |
def rank_remove ( self , rank , items , cutoff ) :
"""remove tokens or stems ( specified in items ) based on rank ' s ( df or
tfidf ) value being less than cutoff to remove all words with rank R or
less , specify cutoff = self . xxx _ ranking [ R ] [ 1]""" | def remove ( tokens ) :
return [ t for t in tokens if t not in to_remove ]
if rank == "df" :
to_remove = set ( [ t [ 0 ] for t in self . df_ranking if t [ 1 ] <= cutoff ] )
elif rank == "tfidf" :
to_remove = set ( [ t [ 0 ] for t in self . tfidf_ranking if t [ 1 ] <= cutoff ] )
else :
raise ValueError ( "Rank must be either \'df\' or \'tfidf\'." )
if items == 'tokens' :
self . tokens = list ( map ( remove , self . tokens ) )
elif items == 'bigrams' :
self . bigrams = list ( map ( remove , self . bigrams ) )
elif items == 'stems' :
self . stems = list ( map ( remove , self . stems ) )
else :
raise ValueError ( "Items must be either \'tokens\', \'bigrams\' or \'stems\'." ) |
def make_full_path ( basedir , outkey , origname ) :
"""Make a full file path by combining tokens
Parameters
basedir : str
The top level output area
outkey : str
The key for the particular instance of the analysis
origname : str
Template for the output file name
Returns
outpath : str
This will be < basedir > : < outkey > : < newname > . fits
Where newname = origname . replace ( ' . fits ' , ' _ < outkey > . fits ' )""" | return os . path . join ( basedir , outkey , os . path . basename ( origname ) . replace ( '.fits' , '_%s.fits' % outkey ) ) |
def get_ancestor_tag_names ( mention ) :
"""Return the HTML tag of the Mention ' s ancestors .
For example , [ ' html ' , ' body ' , ' p ' ] .
If a candidate is passed in , only the ancestors of its first Mention are returned .
: param mention : The Mention to evaluate
: rtype : list of strings""" | span = _to_span ( mention )
tag_names = [ ]
i = _get_node ( span . sentence )
while i is not None :
tag_names . insert ( 0 , str ( i . tag ) )
i = i . getparent ( )
return tag_names |
def reload_class ( self , verbose = True , reload_module = True ) :
"""special class reloading function
This function is often injected as rrr of classes""" | import utool as ut
verbose = verbose or VERBOSE_CLASS
classname = self . __class__ . __name__
try :
modname = self . __class__ . __module__
if verbose :
print ( '[class] reloading ' + classname + ' from ' + modname )
# - - HACK - -
if hasattr ( self , '_on_reload' ) :
if verbose > 1 :
print ( '[class] calling _on_reload for ' + classname )
self . _on_reload ( )
elif verbose > 1 :
print ( '[class] ' + classname + ' does not have an _on_reload function' )
# Do for all inheriting classes
def find_base_clases ( _class , find_base_clases = None ) :
class_list = [ ]
for _baseclass in _class . __bases__ :
parents = find_base_clases ( _baseclass , find_base_clases )
class_list . extend ( parents )
if _class is not object :
class_list . append ( _class )
return class_list
head_class = self . __class__
# Determine if parents need reloading
class_list = find_base_clases ( head_class , find_base_clases )
# HACK
ignore = { HashComparable2 }
class_list = [ _class for _class in class_list if _class not in ignore ]
for _class in class_list :
if verbose :
print ( '[class] reloading parent ' + _class . __name__ + ' from ' + _class . __module__ )
if _class . __module__ == '__main__' : # Attempt to find the module that is the main module
# This may be very hacky and potentially break
main_module_ = sys . modules [ _class . __module__ ]
main_modname = ut . get_modname_from_modpath ( main_module_ . __file__ )
module_ = sys . modules [ main_modname ]
else :
module_ = sys . modules [ _class . __module__ ]
if hasattr ( module_ , 'rrr' ) :
if reload_module :
module_ . rrr ( verbose = verbose )
else :
if reload_module :
import imp
if verbose :
print ( '[class] reloading ' + _class . __module__ + ' with imp' )
try :
imp . reload ( module_ )
except ( ImportError , AttributeError ) :
print ( '[class] fallback reloading ' + _class . __module__ + ' with imp' )
# one last thing to try . probably used ut . import _ module _ from _ fpath
# when importing this module
imp . load_source ( module_ . __name__ , module_ . __file__ )
# Reset class attributes
_newclass = getattr ( module_ , _class . __name__ )
reload_class_methods ( self , _newclass , verbose = verbose )
# - - HACK - -
# TODO : handle injected definitions
if hasattr ( self , '_initialize_self' ) :
if verbose > 1 :
print ( '[class] calling _initialize_self for ' + classname )
self . _initialize_self ( )
elif verbose > 1 :
print ( '[class] ' + classname + ' does not have an _initialize_self function' )
except Exception as ex :
ut . printex ( ex , 'Error Reloading Class' , keys = [ 'modname' , 'module' , 'class_' , 'class_list' , 'self' , ] )
raise |
def _try_coerce_args ( self , values , other ) :
"""Coerce values and other to dtype ' i8 ' . NaN and NaT convert to
the smallest i8 , and will correctly round - trip to NaT if converted
back in _ try _ coerce _ result . values is always ndarray - like , other
may not be
Parameters
values : ndarray - like
other : ndarray - like or scalar
Returns
base - type values , base - type other""" | values = values . view ( 'i8' )
if isinstance ( other , bool ) :
raise TypeError
elif is_null_datetimelike ( other ) :
other = tslibs . iNaT
elif isinstance ( other , ( datetime , np . datetime64 , date ) ) :
other = self . _box_func ( other )
if getattr ( other , 'tz' ) is not None :
raise TypeError ( "cannot coerce a Timestamp with a tz on a " "naive Block" )
other = other . asm8 . view ( 'i8' )
elif hasattr ( other , 'dtype' ) and is_datetime64_dtype ( other ) :
other = other . astype ( 'i8' , copy = False ) . view ( 'i8' )
else : # coercion issues
# let higher levels handle
raise TypeError ( other )
return values , other |
def read_csv ( csv_file , ext = '.csv' , format = None , delete_empty_keys = False , fieldnames = [ ] , rowlimit = 100000000 , numbers = False , normalize_names = True , unique_names = True , verbosity = 0 ) :
r"""Read a csv file from a path or file pointer , returning a dict of lists , or list of lists ( according to ` format ` )
filename : a directory or list of file paths
numbers : whether to attempt to convert strings in csv to numbers
TODO :
merge with ` nlp . util . make _ dataframe ` function
Handles unquoted and quoted strings , quoted commas , quoted newlines ( EOLs ) , complex numbers ,
times , dates , datetimes
> > > read _ csv ( ' " name \ r \ n " , rank , " serial \ nnumber " , date < BR / > \ t \ n " McCain , John " , " 1 " , " 123456789 " , 9/11/2001 \ n ' +
. . . ' Bob , big cheese , 1-23,1/1/2001 12:00 GMT ' , format = ' header + values list ' , numbers = True )
[ [ ' name ' , ' rank ' , ' serial \ nnumber ' , ' date ' ] , [ ' McCain , John ' , 1.0 , 123456789.0 , ' 9/11/2001 ' ] ,
[ ' Bob ' , ' big cheese ' , ' 1-23 ' , ' 1/1/2001 12:00 GMT ' ] ]""" | if not csv_file :
return
if isinstance ( csv_file , basestring ) : # truncate ` csv _ file ` in case it is a string buffer containing GBs of data
path = csv_file [ : 1025 ]
try : # see http : / / stackoverflow . com / a / 4169762/623735 before trying ' rU '
fpin = open ( path , 'rUb' )
# U = universal EOL reader , b = binary
except ( IOError , FileNotFoundError ) : # truncate path more , in case path is used later as a file description :
path = csv_file [ : 128 ]
fpin = StringIO ( str ( csv_file ) )
else :
fpin = csv_file
try :
path = csv_file . name
except ( IndexError , ValueError , AttributeError , TypeError ) :
path = 'unknown file buffer path'
format = format or 'h'
format = format [ 0 ] . lower ( )
# if fieldnames not specified then assume that first row of csv contains headings
csvr = csv . reader ( fpin , dialect = csv . excel )
if not fieldnames :
while not fieldnames or not any ( fieldnames ) :
fieldnames = strip_br ( [ str ( s ) . strip ( ) for s in next ( csvr ) ] )
if verbosity > 0 :
logger . info ( 'Column Labels: ' + repr ( fieldnames ) )
if unique_names :
norm_names = OrderedDict ( [ ( fldnm , fldnm ) for fldnm in fieldnames ] )
else :
norm_names = OrderedDict ( [ ( num , fldnm ) for num , fldnm in enumerate ( fieldnames ) ] )
if normalize_names :
norm_names = OrderedDict ( [ ( num , make_name ( fldnm , ** make_name . DJANGO_FIELD ) ) for num , fldnm in enumerate ( fieldnames ) ] )
# required for django - formatted json files
model_name = make_name ( path , ** make_name . DJANGO_MODEL )
if format in 'c' : # columnwise dict of lists
recs = OrderedDict ( ( norm_name , [ ] ) for norm_name in list ( norm_names . values ( ) ) )
elif format in 'vh' :
recs = [ fieldnames ]
else :
recs = [ ]
if verbosity > 0 :
logger . info ( 'Field Names: ' + repr ( norm_names if normalize_names else fieldnames ) )
rownum = 0
eof = False
pbar = None
start_seek_pos = fpin . tell ( ) or 0
if verbosity > 1 :
print ( 'Starting at byte {} in file buffer.' . format ( start_seek_pos ) )
fpin . seek ( 0 , os . SEEK_END )
file_len = fpin . tell ( ) - start_seek_pos
# os . fstat ( fpin . fileno ( ) ) . st _ size
fpin . seek ( start_seek_pos )
if verbosity > 1 :
print ( ( 'There appear to be {} bytes remaining in the file buffer.' + 'Resetting (seek) to starting position in file.' ) . format ( file_len ) )
# if verbosity > 0:
# pbar = progressbar . ProgressBar ( maxval = file _ len )
# pbar . start ( )
while csvr and rownum < rowlimit and not eof :
if pbar :
pbar . update ( fpin . tell ( ) - start_seek_pos )
rownum += 1
row = [ ]
row_dict = OrderedDict ( )
# skip rows with all empty strings as values ,
while not row or not any ( len ( x ) for x in row ) :
try :
row = next ( csvr )
if verbosity > 1 :
logger . info ( ' row content: ' + repr ( row ) )
except StopIteration :
eof = True
break
if eof :
break
if len ( row ) and isinstance ( row [ - 1 ] , basestring ) and len ( row [ - 1 ] ) :
row = strip_br ( row )
if numbers : # try to convert the type to a numerical scalar type ( int , float etc )
row = [ tryconvert ( v , desired_types = NUMBERS_AND_DATETIMES , empty = None , default = v ) for v in row ]
if row :
N = min ( max ( len ( row ) , 0 ) , len ( norm_names ) )
row_dict = OrderedDict ( ( ( field_name , field_value ) for field_name , field_value in zip ( list ( list ( norm_names . values ( ) ) if unique_names else norm_names ) [ : N ] , row [ : N ] ) if ( str ( field_name ) . strip ( ) or delete_empty_keys is False ) ) )
if format in 'dj' : # django json format
recs += [ { "pk" : rownum , "model" : model_name , "fields" : row_dict } ]
elif format in 'vhl' : # list of lists of values , with header row ( list of str )
recs += [ [ value for field_name , value in viewitems ( row_dict ) if ( field_name . strip ( ) or delete_empty_keys is False ) ] ]
elif format in 'c' : # columnwise dict of lists
for field_name in row_dict :
recs [ field_name ] += [ row_dict [ field_name ] ]
if verbosity > 2 :
print ( [ recs [ field_name ] [ - 1 ] for field_name in row_dict ] )
else :
recs += [ row_dict ]
if verbosity > 2 and format not in 'c' :
print ( recs [ - 1 ] )
if file_len > fpin . tell ( ) :
logger . info ( "Only %d of %d bytes were read and processed." % ( fpin . tell ( ) , file_len ) )
if pbar :
pbar . finish ( )
fpin . close ( )
if not unique_names :
return recs , norm_names
return recs |
def GetInput ( self ) :
"""Yield client urns .""" | client_list = GetAllClients ( token = self . token )
logging . debug ( "Got %d clients" , len ( client_list ) )
for client_group in collection . Batch ( client_list , self . client_chunksize ) :
for fd in aff4 . FACTORY . MultiOpen ( client_group , mode = "r" , aff4_type = aff4_grr . VFSGRRClient , token = self . token ) :
if isinstance ( fd , aff4_grr . VFSGRRClient ) : # Skip if older than max _ age
oldest_time = ( time . time ( ) - self . max_age ) * 1e6
if fd . Get ( aff4_grr . VFSGRRClient . SchemaCls . PING ) >= oldest_time :
yield fd |
def from_pkcs12 ( cls , key , email , scopes , subject = None , passphrase = PKCS12_PASSPHRASE ) :
"""Alternate constructor intended for using . p12 files .
Args :
key ( dict ) - Parsed JSON with service account credentials .
email ( str ) - Service account email .
scopes ( Union [ str , collections . Iterable [ str ] ] ) -
List of permissions that the application requests .
subject ( str ) - The email address of the user for which
the application is requesting delegated access .
passphrase ( str ) - Passphrase of private key file .
Google generates . p12 files secured with fixed ' notasecret '
passphrase , so if you didn ' t change it it ' s fine to omit
this parameter .
Returns :
ServiceAccount""" | key = OpenSSL . crypto . load_pkcs12 ( key , passphrase ) . get_privatekey ( )
return cls ( key = key , email = email , scopes = scopes , subject = subject ) |
def _pidExists ( pid ) :
"""This will return True if the process associated with pid is still running on the machine .
This is based on stackoverflow question 568271.
: param int pid : ID of the process to check for
: return : True / False
: rtype : bool""" | assert pid > 0
try :
os . kill ( pid , 0 )
except OSError as err :
if err . errno == errno . ESRCH : # ESRCH = = No such process
return False
else :
raise
else :
return True |
def disk_usage ( path , human = False ) :
"""disk usage in bytes or human readable format ( e . g . ' 2,1GB ' )""" | command = [ 'du' , '-s' , path ]
if human :
command . append ( '-h' )
return subprocess . check_output ( command ) . split ( ) [ 0 ] . decode ( 'utf-8' ) |
def _load_output_data_port_models ( self ) :
"""Reloads the output data port models directly from the the state""" | if not self . state_copy_initialized :
return
self . output_data_ports = [ ]
for output_data_port_m in self . state_copy . output_data_ports :
new_op_m = deepcopy ( output_data_port_m )
new_op_m . parent = self
new_op_m . data_port = output_data_port_m . data_port
self . output_data_ports . append ( new_op_m ) |
def imwrite ( file , data = None , shape = None , dtype = None , ** kwargs ) :
"""Write numpy array to TIFF file .
Refer to the TiffWriter class and its asarray function for documentation .
A BigTIFF file is created if the data size in bytes is larger than 4 GB
minus 32 MB ( for metadata ) , and ' bigtiff ' is not specified , and ' imagej '
or ' truncate ' are not enabled .
Parameters
file : str or binary stream
File name or writable binary stream , such as an open file or BytesIO .
data : array _ like
Input image . The last dimensions are assumed to be image depth ,
height , width , and samples .
If None , an empty array of the specified shape and dtype is
saved to file .
Unless ' byteorder ' is specified in ' kwargs ' , the TIFF file byte order
is determined from the data ' s dtype or the dtype argument .
shape : tuple
If ' data ' is None , shape of an empty array to save to the file .
dtype : numpy . dtype
If ' data ' is None , data - type of an empty array to save to the file .
kwargs : dict
Parameters ' append ' , ' byteorder ' , ' bigtiff ' , and ' imagej ' , are passed
to the TiffWriter constructor . Other parameters are passed to the
TiffWriter . save function .
Returns
offset , bytecount : tuple or None
If the image data are written contiguously , return offset and bytecount
of image data in the file .""" | tifargs = parse_kwargs ( kwargs , 'append' , 'bigtiff' , 'byteorder' , 'imagej' )
if data is None :
dtype = numpy . dtype ( dtype )
size = product ( shape ) * dtype . itemsize
byteorder = dtype . byteorder
else :
try :
size = data . nbytes
byteorder = data . dtype . byteorder
except Exception :
size = 0
byteorder = None
bigsize = kwargs . pop ( 'bigsize' , 2 ** 32 - 2 ** 25 )
if 'bigtiff' not in tifargs and size > bigsize and not ( tifargs . get ( 'imagej' , False ) or tifargs . get ( 'truncate' , False ) ) :
tifargs [ 'bigtiff' ] = True
if 'byteorder' not in tifargs :
tifargs [ 'byteorder' ] = byteorder
with TiffWriter ( file , ** tifargs ) as tif :
return tif . save ( data , shape , dtype , ** kwargs ) |
def _get_all_set_properties ( self ) :
"""Collect names of set properties .
Returns :
set : Set containing names of all properties , which are set to non - None value .""" | return set ( property_name for property_name in worker_mapping ( ) . keys ( ) if getattr ( self , property_name ) is not None ) |
def optionsFromEnvironment ( defaults = None ) :
"""Fetch root URL and credentials from the standard TASKCLUSTER _ . . .
environment variables and return them in a format suitable for passing to a
client constructor .""" | options = defaults or { }
credentials = options . get ( 'credentials' , { } )
rootUrl = os . environ . get ( 'TASKCLUSTER_ROOT_URL' )
if rootUrl :
options [ 'rootUrl' ] = rootUrl
clientId = os . environ . get ( 'TASKCLUSTER_CLIENT_ID' )
if clientId :
credentials [ 'clientId' ] = clientId
accessToken = os . environ . get ( 'TASKCLUSTER_ACCESS_TOKEN' )
if accessToken :
credentials [ 'accessToken' ] = accessToken
certificate = os . environ . get ( 'TASKCLUSTER_CERTIFICATE' )
if certificate :
credentials [ 'certificate' ] = certificate
if credentials :
options [ 'credentials' ] = credentials
return options |
def _smcra_to_str ( self , smcra , temp_dir = '/tmp/' ) :
"""WHATIF ' s input are PDB format files .
Converts a SMCRA object to a PDB formatted string .""" | temp_path = tempfile . mktemp ( '.pdb' , dir = temp_dir )
io = PDBIO ( )
io . set_structure ( smcra )
io . save ( temp_path )
f = open ( temp_path , 'r' )
string = f . read ( )
f . close ( )
os . remove ( temp_path )
return string |
def webfont_cookie ( request ) :
'''Adds WEBFONT Flag to the context''' | if hasattr ( request , 'COOKIES' ) and request . COOKIES . get ( WEBFONT_COOKIE_NAME , None ) :
return { WEBFONT_COOKIE_NAME . upper ( ) : True }
return { WEBFONT_COOKIE_NAME . upper ( ) : False } |
def _getter ( self ) :
"""Return a function object suitable for the " get " side of the attribute
property descriptor .""" | def get_attr_value ( obj ) :
attr_str_value = obj . get ( self . _clark_name )
if attr_str_value is None :
return self . _default
return self . _simple_type . from_xml ( attr_str_value )
get_attr_value . __doc__ = self . _docstring
return get_attr_value |
def _get_non_tempy_contents ( self ) :
"""Returns rendered Contents and non - DOMElement stuff inside this Tag .""" | for thing in filter ( lambda x : not issubclass ( x . __class__ , DOMElement ) , self . childs ) :
yield thing |
def pyeapi_nxos_api_args ( ** prev_kwargs ) :
'''. . versionadded : : 2019.2.0
Return the key - value arguments used for the authentication arguments for the
: mod : ` pyeapi execution module < salt . module . arista _ pyeapi > ` .
CLI Example :
. . code - block : : bash
salt ' * ' napalm . pyeapi _ nxos _ api _ args''' | kwargs = { }
napalm_opts = salt . utils . napalm . get_device_opts ( __opts__ , salt_obj = __salt__ )
optional_args = napalm_opts [ 'OPTIONAL_ARGS' ]
kwargs [ 'host' ] = napalm_opts [ 'HOSTNAME' ]
kwargs [ 'username' ] = napalm_opts [ 'USERNAME' ]
kwargs [ 'password' ] = napalm_opts [ 'PASSWORD' ]
kwargs [ 'timeout' ] = napalm_opts [ 'TIMEOUT' ]
kwargs [ 'transport' ] = optional_args . get ( 'transport' )
kwargs [ 'port' ] = optional_args . get ( 'port' )
kwargs [ 'verify' ] = optional_args . get ( 'verify' )
prev_kwargs . update ( kwargs )
return prev_kwargs |
def get_sponsored_special_coverage_query ( only_recent = False ) :
"""Reference to all SpecialCovearge queries .
: param only _ recent : references RECENT _ SPONSORED _ OFFSET _ HOURS from django settings .
Used to return sponsored content within a given configuration of hours .
: returns : Djes . LazySearch query matching all active speical coverages .""" | special_coverages = get_sponsored_special_coverages ( )
es_query = SearchParty ( special_coverages ) . search ( )
if only_recent :
offset = getattr ( settings , "RECENT_SPONSORED_OFFSET_HOURS" , 0 )
es_query = es_query . filter ( Published ( after = timezone . now ( ) - timezone . timedelta ( hours = offset ) ) )
return es_query |
def telnet_login ( self , pri_prompt_terminator = r"#\s*$" , alt_prompt_terminator = r">\s*$" , username_pattern = r"(?:user:|username|login|user name)" , pwd_pattern = r"assword" , delay_factor = 1 , max_loops = 20 , ) :
"""Telnet login . Can be username / password or just password .
: param pri _ prompt _ terminator : Primary trailing delimiter for identifying a device prompt
: type pri _ prompt _ terminator : str
: param alt _ prompt _ terminator : Alternate trailing delimiter for identifying a device prompt
: type alt _ prompt _ terminator : str
: param username _ pattern : Pattern used to identify the username prompt
: type username _ pattern : str
: param delay _ factor : See _ _ init _ _ : global _ delay _ factor
: type delay _ factor : int
: param max _ loops : Controls the wait time in conjunction with the delay _ factor
( default : 20)""" | delay_factor = self . select_delay_factor ( delay_factor )
time . sleep ( 1 * delay_factor )
output = ""
return_msg = ""
i = 1
while i <= max_loops :
try :
output = self . read_channel ( )
return_msg += output
# Search for username pattern / send username
if re . search ( username_pattern , output , flags = re . I ) :
self . write_channel ( self . username + self . TELNET_RETURN )
time . sleep ( 1 * delay_factor )
output = self . read_channel ( )
return_msg += output
# Search for password pattern / send password
if re . search ( pwd_pattern , output , flags = re . I ) :
self . write_channel ( self . password + self . TELNET_RETURN )
time . sleep ( 0.5 * delay_factor )
output = self . read_channel ( )
return_msg += output
if re . search ( pri_prompt_terminator , output , flags = re . M ) or re . search ( alt_prompt_terminator , output , flags = re . M ) :
return return_msg
# Check if proper data received
if re . search ( pri_prompt_terminator , output , flags = re . M ) or re . search ( alt_prompt_terminator , output , flags = re . M ) :
return return_msg
self . write_channel ( self . TELNET_RETURN )
time . sleep ( 0.5 * delay_factor )
i += 1
except EOFError :
self . remote_conn . close ( )
msg = "Login failed: {}" . format ( self . host )
raise NetMikoAuthenticationException ( msg )
# Last try to see if we already logged in
self . write_channel ( self . TELNET_RETURN )
time . sleep ( 0.5 * delay_factor )
output = self . read_channel ( )
return_msg += output
if re . search ( pri_prompt_terminator , output , flags = re . M ) or re . search ( alt_prompt_terminator , output , flags = re . M ) :
return return_msg
msg = "Login failed: {}" . format ( self . host )
self . remote_conn . close ( )
raise NetMikoAuthenticationException ( msg ) |
def __is_surrogate_escaped ( self , text ) :
"""Checks if surrogate is escaped""" | try :
text . encode ( 'utf-8' )
except UnicodeEncodeError as e :
if e . reason == 'surrogates not allowed' :
return True
return False |
def unassigned ( data , as_json = False ) :
"""https : / / sendgrid . com / docs / API _ Reference / api _ v3 . html # ip - addresses
The / ips rest endpoint returns information about the IP addresses
and the usernames assigned to an IP
unassigned returns a listing of the IP addresses that are allocated
but have 0 users assigned
data ( response . body from sg . client . ips . get ( ) )
as _ json False - > get list of dicts
True - > get json object
example :
sg = sendgrid . SendGridAPIClient ( os . environ . get ( ' SENDGRID _ API _ KEY ' ) )
params = {
' subuser ' : ' test _ string ' ,
' ip ' : ' test _ string ' ,
' limit ' : 1,
' exclude _ whitelabels ' :
' true ' , ' offset ' : 1
response = sg . client . ips . get ( query _ params = params )
if response . status _ code = = 201:
data = response . body
unused = unassigned ( data )""" | no_subusers = set ( )
if not isinstance ( data , list ) :
return format_ret ( no_subusers , as_json = as_json )
for current in data :
num_subusers = len ( current [ "subusers" ] )
if num_subusers == 0 :
current_ip = current [ "ip" ]
no_subusers . add ( current_ip )
ret_val = format_ret ( no_subusers , as_json = as_json )
return ret_val |
def fit_circular_gaussian ( samples , high = np . pi , low = 0 ) :
"""Compute the circular mean for samples in a range
Args :
samples ( ndarray ) : a one or two dimensional array . If one dimensional we calculate the fit using all
values . If two dimensional , we fit the Gaussian for every set of samples over the first dimension .
high ( float ) : The maximum wrap point
low ( float ) : The minimum wrap point""" | cl_func = SimpleCLFunction . from_string ( '''
void compute(global mot_float_type* samples,
global mot_float_type* means,
global mot_float_type* stds,
int nmr_samples,
int low,
int high){
double cos_mean = 0;
double sin_mean = 0;
double ang;
for(uint i = 0; i < nmr_samples; i++){
ang = (samples[i] - low)*2*M_PI / (high - low);
cos_mean += (cos(ang) - cos_mean) / (i + 1);
sin_mean += (sin(ang) - sin_mean) / (i + 1);
}
double R = hypot(cos_mean, sin_mean);
if(R > 1){
R = 1;
}
double stds = 1/2. * sqrt(-2 * log(R));
double res = atan2(sin_mean, cos_mean);
if(res < 0){
res += 2 * M_PI;
}
*(means) = res*(high - low)/2.0/M_PI + low;
*(stds) = ((high - low)/2.0/M_PI) * sqrt(-2*log(R));
}
''' )
def run_cl ( samples ) :
data = { 'samples' : Array ( samples , 'mot_float_type' ) , 'means' : Zeros ( samples . shape [ 0 ] , 'mot_float_type' ) , 'stds' : Zeros ( samples . shape [ 0 ] , 'mot_float_type' ) , 'nmr_samples' : Scalar ( samples . shape [ 1 ] ) , 'low' : Scalar ( low ) , 'high' : Scalar ( high ) , }
cl_func . evaluate ( data , samples . shape [ 0 ] )
return data [ 'means' ] . get_data ( ) , data [ 'stds' ] . get_data ( )
if len ( samples . shape ) == 1 :
mean , std = run_cl ( samples [ None , : ] )
return mean [ 0 ] , std [ 0 ]
return run_cl ( samples ) |
def _copy ( source , destination , ignore = None ) :
"""Effective copy""" | if os . path . isdir ( source ) :
shutil . copytree ( source , destination , symlinks = True , ignore = ignore )
else :
shutil . copy ( source , destination )
shutil . copystat ( source , destination ) |
def _call ( self , command , ignore_errors = None ) :
"""Call remote command with logging .""" | if ignore_errors is None :
ignore_errors = [ ]
command = self . _get_command ( command )
logger . debug ( "Cmd %s" % command )
null = open ( '/dev/null' , 'w' )
retcode = subprocess . call ( command , stdout = null , stderr = null )
null . close ( )
if retcode in ignore_errors :
logger . debug ( "<-- Cmd %s returned %d (ignored)" % ( command , retcode ) )
return
if retcode :
logger . error ( "<-- Cmd %s returned: %d (error)" % ( command , retcode ) )
raise subprocess . CalledProcessError ( retcode , command )
logger . debug ( "<-- Returned %d (good)" % retcode )
return |
def is_trusted_subject ( request ) :
"""Determine if calling subject is fully trusted .""" | logging . debug ( 'Active subjects: {}' . format ( ', ' . join ( request . all_subjects_set ) ) )
logging . debug ( 'Trusted subjects: {}' . format ( ', ' . join ( get_trusted_subjects ( ) ) ) )
return not request . all_subjects_set . isdisjoint ( get_trusted_subjects ( ) ) |
def archive ( self ) :
"""Archives the resource .""" | self . _client . _put ( "{0}/archived" . format ( self . __class__ . base_url ( self . sys [ 'space' ] . id , self . sys [ 'id' ] , environment_id = self . _environment_id ) , ) , { } , headers = self . _update_headers ( ) )
return self . reload ( ) |
def fetch ( self , link , into = None ) :
"""Fetch the binary content associated with the link and write to a file .
: param link : The : class : ` Link ` to fetch .
: keyword into : If specified , write into the directory ` ` into ` ` . If ` ` None ` ` , creates a new
temporary directory that persists for the duration of the interpreter .""" | target = os . path . join ( into or safe_mkdtemp ( ) , link . filename )
if os . path . exists ( target ) : # Assume that if the local file already exists , it is safe to use .
return target
with TRACER . timed ( 'Fetching %s' % link . url , V = 2 ) :
target_tmp = '%s.%s' % ( target , uuid . uuid4 ( ) )
with contextlib . closing ( self . open ( link ) ) as in_fp :
with safe_open ( target_tmp , 'wb' ) as out_fp :
shutil . copyfileobj ( in_fp , out_fp )
os . rename ( target_tmp , target )
return target |
def get_last_response_xml ( self , pretty_print_if_possible = False ) :
"""Retrieves the raw XML ( decrypted ) of the last SAML response ,
or the last Logout Response generated or processed
: returns : SAML response XML
: rtype : string | None""" | response = None
if self . __last_response is not None :
if isinstance ( self . __last_response , basestring ) :
response = self . __last_response
else :
response = tostring ( self . __last_response , pretty_print = pretty_print_if_possible )
return response |
def output_forecasts_csv ( self , forecasts , mode , csv_path , run_date_format = "%Y%m%d-%H%M" ) :
"""Output hail forecast values to csv files by run date and ensemble member .
Args :
forecasts :
mode :
csv _ path :
Returns :""" | merged_forecasts = pd . merge ( forecasts [ "condition" ] , forecasts [ "dist" ] , on = [ "Step_ID" , "Track_ID" , "Ensemble_Member" , "Forecast_Hour" ] )
all_members = self . data [ mode ] [ "combo" ] [ "Ensemble_Member" ]
members = np . unique ( all_members )
all_run_dates = pd . DatetimeIndex ( self . data [ mode ] [ "combo" ] [ "Run_Date" ] )
run_dates = pd . DatetimeIndex ( np . unique ( all_run_dates ) )
print ( run_dates )
for member in members :
for run_date in run_dates :
mem_run_index = ( all_run_dates == run_date ) & ( all_members == member )
member_forecast = merged_forecasts . loc [ mem_run_index ]
member_forecast . to_csv ( join ( csv_path , "hail_forecasts_{0}_{1}_{2}.csv" . format ( self . ensemble_name , member , run_date . strftime ( run_date_format ) ) ) )
return |
def _setup_time ( self , basemap ) :
"""generates CartoCSS for time - based maps ( torque )""" | # validate time column information
if self . geom_type != 'point' :
raise ValueError ( 'Cannot do time-based maps with data in ' '`{query}` since this table does not contain ' 'point geometries' . format ( query = self . orig_query ) )
elif self . style_cols [ self . time [ 'column' ] ] not in ( 'number' , 'date' , ) :
raise ValueError ( 'Cannot create an animated map from column ' '`{col}` because it is of type {t1}. It must ' 'be of type number or date.' . format ( col = self . time [ 'column' ] , t1 = self . style_cols [ self . time [ 'column' ] ] ) )
# don ' t use turbo - carto for animated maps
column = self . time [ 'column' ]
frames = self . time [ 'frames' ]
method = self . time [ 'method' ]
duration = self . time [ 'duration' ]
if ( self . color in self . style_cols and self . style_cols [ self . color ] in ( 'string' , 'boolean' , ) ) :
self . query = minify_sql ( [ 'SELECT' , ' orig.*, __wrap.cf_value_{col}' , 'FROM ({query}) AS orig, (' , ' SELECT' , ' row_number() OVER (' , ' ORDER BY val_{col}_cnt DESC) AS cf_value_{col},' , ' {col}' , ' FROM (' , ' SELECT {col}, count({col}) AS val_{col}_cnt' , ' FROM ({query}) as orig' , ' GROUP BY {col}' , ' ORDER BY 2 DESC' , ' ) AS _wrap' , ') AS __wrap' , 'WHERE __wrap.{col} = orig.{col}' , ] ) . format ( col = self . color , query = self . orig_query )
agg_func = '\'CDB_Math_Mode(cf_value_{})\'' . format ( self . color )
self . scheme = { 'bins' : [ str ( i ) for i in range ( 1 , 11 ) ] , 'name' : ( self . scheme . get ( 'name' ) if self . scheme else 'Bold' ) , 'bin_method' : '' , }
elif ( self . color in self . style_cols and self . style_cols [ self . color ] in ( 'number' , ) ) :
self . query = ' ' . join ( [ 'SELECT *, {col} as value' , 'FROM ({query}) as _wrap' ] ) . format ( col = self . color , query = self . orig_query )
agg_func = '\'avg({})\'' . format ( self . color )
else :
agg_func = "'{method}(cartodb_id)'" . format ( method = method )
self . torque_cartocss = cssify ( { 'Map' : { '-torque-frame-count' : frames , '-torque-animation-duration' : duration , '-torque-time-attribute' : "'{}'" . format ( column ) , '-torque-aggregation-function' : agg_func , '-torque-resolution' : 1 , '-torque-data-aggregation' : ( 'cumulative' if self . time [ 'cumulative' ] else 'linear' ) , } , } )
self . cartocss = ( self . torque_cartocss + self . _get_cartocss ( basemap , has_time = True ) ) |
def _bool_method_SERIES ( cls , op , special ) :
"""Wrapper function for Series arithmetic operations , to avoid
code duplication .""" | op_name = _get_op_name ( op , special )
def na_op ( x , y ) :
try :
result = op ( x , y )
except TypeError :
assert not isinstance ( y , ( list , ABCSeries , ABCIndexClass ) )
if isinstance ( y , np . ndarray ) : # bool - bool dtype operations should be OK , should not get here
assert not ( is_bool_dtype ( x ) and is_bool_dtype ( y ) )
x = ensure_object ( x )
y = ensure_object ( y )
result = libops . vec_binop ( x , y , op )
else : # let null fall thru
assert lib . is_scalar ( y )
if not isna ( y ) :
y = bool ( y )
try :
result = libops . scalar_binop ( x , y , op )
except ( TypeError , ValueError , AttributeError , OverflowError , NotImplementedError ) :
raise TypeError ( "cannot compare a dtyped [{dtype}] array " "with a scalar of type [{typ}]" . format ( dtype = x . dtype , typ = type ( y ) . __name__ ) )
return result
fill_int = lambda x : x . fillna ( 0 )
fill_bool = lambda x : x . fillna ( False ) . astype ( bool )
def wrapper ( self , other ) :
is_self_int_dtype = is_integer_dtype ( self . dtype )
self , other = _align_method_SERIES ( self , other , align_asobject = True )
res_name = get_op_result_name ( self , other )
if isinstance ( other , ABCDataFrame ) : # Defer to DataFrame implementation ; fail early
return NotImplemented
elif isinstance ( other , ( ABCSeries , ABCIndexClass ) ) :
is_other_int_dtype = is_integer_dtype ( other . dtype )
other = fill_int ( other ) if is_other_int_dtype else fill_bool ( other )
ovalues = other . values
finalizer = lambda x : x
else : # scalars , list , tuple , np . array
is_other_int_dtype = is_integer_dtype ( np . asarray ( other ) )
if is_list_like ( other ) and not isinstance ( other , np . ndarray ) : # TODO : Can we do this before the is _ integer _ dtype check ?
# could the is _ integer _ dtype check be checking the wrong
# thing ? e . g . other = [ [ 0 , 1 ] , [ 2 , 3 ] , [ 4 , 5 ] ] ?
other = construct_1d_object_array_from_listlike ( other )
ovalues = other
finalizer = lambda x : x . __finalize__ ( self )
# For int vs int ` ^ ` , ` | ` , ` & ` are bitwise operators and return
# integer dtypes . Otherwise these are boolean ops
filler = ( fill_int if is_self_int_dtype and is_other_int_dtype else fill_bool )
res_values = na_op ( self . values , ovalues )
unfilled = self . _constructor ( res_values , index = self . index , name = res_name )
filled = filler ( unfilled )
return finalizer ( filled )
wrapper . __name__ = op_name
return wrapper |
def complete_filename ( text ) :
'''complete a filename''' | # ensure directories have trailing slashes :
list = glob . glob ( text + '*' )
for idx , val in enumerate ( list ) :
if os . path . isdir ( val ) :
list [ idx ] = ( val + os . path . sep )
return list |
def get_long ( ) :
"""Generates a random long . The length of said long varies by platform .""" | # The C long type to populate .
pbRandomData = c_ulong ( )
# Determine the byte size of this machine ' s long type .
size_of_long = wintypes . DWORD ( sizeof ( pbRandomData ) )
# Used to keep track of status . 1 = success , 0 = error .
ok = c_int ( )
# Provider ?
hProv = c_ulong ( )
ok = windll . Advapi32 . CryptAcquireContextA ( byref ( hProv ) , None , None , PROV_RSA_FULL , 0 )
ok = windll . Advapi32 . CryptGenRandom ( hProv , size_of_long , byref ( pbRandomData ) )
return pbRandomData . value |
def remove_params ( self , * args ) :
"""Remove [ possibly many ] parameters from the Assembly .
E . g . ,
remove _ params ( ' color ' , ' visibility ' )""" | for a in args :
self . _orig_kwargs . pop ( a )
self . kwargs = self . _orig_kwargs . copy ( ) |
def log_before ( attr , level = logging . DEBUG ) :
'''Decorator to log attribute ' s value ( s ) before function call .
Implementation allows usage only for methods belonging to class . The class
instance needs to have a : attr : ` logger ` that is derived from
: py : class : ` ~ creamas . logging . ObjectLogger ` .
: param int level : logging level
: param str attr : name of the class instance ' s parameter to be logged''' | def deco ( func ) :
@ wraps ( func )
def wrapper ( * args , ** kwargs ) :
logger = getattr ( args [ 0 ] , 'logger' , None )
if logger is not None :
logger . log_attr ( level , attr )
return func ( * args , ** kwargs )
return wrapper
return deco |
def set_current_canvas ( canvas ) :
"""Make a canvas active . Used primarily by the canvas itself .""" | # Notify glir
canvas . context . _do_CURRENT_command = True
# Try to be quick
if canvasses and canvasses [ - 1 ] ( ) is canvas :
return
# Make this the current
cc = [ c ( ) for c in canvasses if c ( ) is not None ]
while canvas in cc :
cc . remove ( canvas )
cc . append ( canvas )
canvasses [ : ] = [ weakref . ref ( c ) for c in cc ] |
def main ( pargs ) :
"""This should only be used for testing . The primary mode of operation is
as an imported library .""" | input_file = sys . argv [ 1 ]
fp = ParseFileLineByLine ( input_file )
for i in fp :
print ( i ) |
def memoize ( Class , * args , ** kwargs ) :
'''Memoize / record a function inside this vlermv . : :
@ Vlermv . cache ( ' ~ / . http ' )
def get ( url ) :
return requests . get ( url , auth = ( ' username ' , ' password ' ) )
The args and kwargs get passed to the Vlermv with some slight changes .
Here are the changes .
First , the default ` ` key _ transformer ` ` is the tuple key _ transformer
rather than the simple key _ transformer .
Second , it is valid for cache to be called without arguments .
Vlermv would ordinarily fail if no arguments were passed to it .
If you pass no arguments to cache , the Vlermv directory argument
( the one required argument ) will be set to the name of the function .
Third , you are more likely to use the ` ` cache _ exceptions ` ` keyword
argument ; see : py : class : ` ~ vlermv . Vlermv ` for documentation on that .''' | def decorator ( func ) :
if len ( args ) == 0 :
if hasattr ( func , '__name__' ) :
_args = ( func . __name__ , )
else :
raise ValueError ( 'You must specify the location to store the vlermv.' )
else :
_args = args
v = Class ( * _args , ** kwargs )
v . func = func
return v
return decorator |
def assign_proficiency_to_objective_bank ( self , proficiency_id , objective_bank_id ) :
"""Adds an existing ` ` Proficiency ` ` to a ` ` ObjectiveBank ` ` .
arg : proficiency _ id ( osid . id . Id ) : the ` ` Id ` ` of the
` ` Proficiency ` `
arg : objective _ bank _ id ( osid . id . Id ) : the ` ` Id ` ` of the
` ` ObjectiveBank ` `
raise : AlreadyExists - ` ` proficiency _ id ` ` is already mapped to
` ` objective _ bank _ id ` `
raise : NotFound - ` ` proficiency _ id ` ` or ` ` objective _ bank _ id ` `
not found
raise : NullArgument - ` ` proficiency _ id ` ` or
` ` objective _ bank _ id ` ` is ` ` null ` `
raise : OperationFailed - unable to complete request
raise : PermissionDenied - authorization failure
* compliance : mandatory - - This method must be implemented . *""" | # Implemented from template for
# osid . resource . ResourceBinAssignmentSession . assign _ resource _ to _ bin
mgr = self . _get_provider_manager ( 'LEARNING' , local = True )
lookup_session = mgr . get_objective_bank_lookup_session ( proxy = self . _proxy )
lookup_session . get_objective_bank ( objective_bank_id )
# to raise NotFound
self . _assign_object_to_catalog ( proficiency_id , objective_bank_id ) |
def is_in_path ( program ) :
'''Check if a program is in the system ` ` PATH ` ` .
Checks if a given program is in the user ' s ` ` PATH ` ` or not .
Args :
program ( str ) : The program to try to find in ` ` PATH ` ` .
Returns :
bool : Is the program in ` ` PATH ` ` ?''' | if sys . version_info . major == 2 :
path = os . getenv ( 'PATH' )
if os . name == 'nt' :
path = path . split ( ';' )
else :
path = path . split ( ':' )
else :
path = os . get_exec_path ( )
for i in path :
if os . path . isdir ( i ) :
if program in os . listdir ( i ) :
return True |
def cmd_dns_lookup_reverse ( ip_address , verbose ) :
"""Perform a reverse lookup of a given IP address .
Example :
$ $ habu . dns . lookup . reverse 8.8.8.8
" hostname " : " google - public - dns - a . google . com " """ | if verbose :
logging . basicConfig ( level = logging . INFO , format = '%(message)s' )
print ( "Looking up %s..." % ip_address , file = sys . stderr )
answer = lookup_reverse ( ip_address )
if answer :
print ( json . dumps ( answer , indent = 4 ) )
else :
print ( "[X] %s is not valid IPv4/IPV6 address" % ip_address )
return True |
def _linux_cpudata ( ) :
'''Return some CPU information for Linux minions''' | # Provides :
# num _ cpus
# cpu _ model
# cpu _ flags
grains = { }
cpuinfo = '/proc/cpuinfo'
# Parse over the cpuinfo file
if os . path . isfile ( cpuinfo ) :
with salt . utils . files . fopen ( cpuinfo , 'r' ) as _fp :
grains [ 'num_cpus' ] = 0
for line in _fp :
comps = line . split ( ':' )
if not len ( comps ) > 1 :
continue
key = comps [ 0 ] . strip ( )
val = comps [ 1 ] . strip ( )
if key == 'processor' :
grains [ 'num_cpus' ] += 1
elif key == 'model name' :
grains [ 'cpu_model' ] = val
elif key == 'flags' :
grains [ 'cpu_flags' ] = val . split ( )
elif key == 'Features' :
grains [ 'cpu_flags' ] = val . split ( )
# ARM support - / proc / cpuinfo
# Processor : ARMv6 - compatible processor rev 7 ( v6l )
# BogoMIPS : 697.95
# Features : swp half thumb fastmult vfp edsp java tls
# CPU implementer : 0x41
# CPU architecture : 7
# CPU variant : 0x0
# CPU part : 0xb76
# CPU revision : 7
# Hardware : BCM2708
# Revision : 0002
# Serial : 00000
elif key == 'Processor' :
grains [ 'cpu_model' ] = val . split ( '-' ) [ 0 ]
grains [ 'num_cpus' ] = 1
if 'num_cpus' not in grains :
grains [ 'num_cpus' ] = 0
if 'cpu_model' not in grains :
grains [ 'cpu_model' ] = 'Unknown'
if 'cpu_flags' not in grains :
grains [ 'cpu_flags' ] = [ ]
return grains |
def save_data ( self , trigger_id , ** data ) :
"""let ' s save the data
: param trigger _ id : trigger ID from which to save data
: param data : the data to check to be used and save
: type trigger _ id : int
: type data : dict
: return : the status of the save statement
: rtype : boolean""" | title , content = super ( ServiceMastodon , self ) . save_data ( trigger_id , ** data )
# check if we have a ' good ' title
if self . title_or_content ( title ) :
content = str ( "{title} {link}" ) . format ( title = title , link = data . get ( 'link' ) )
content += get_tags ( Mastodon , trigger_id )
# if not then use the content
else :
content += " " + data . get ( 'link' ) + " " + get_tags ( Mastodon , trigger_id )
content = self . set_mastodon_content ( content )
us = UserService . objects . get ( user = self . user , token = self . token , name = 'ServiceMastodon' )
try :
toot_api = MastodonAPI ( client_id = us . client_id , client_secret = us . client_secret , access_token = self . token , api_base_url = us . host )
except ValueError as e :
logger . error ( e )
status = False
update_result ( trigger_id , msg = e , status = status )
media_ids = None
try :
if settings . DJANGO_TH [ 'sharing_media' ] : # do we have a media in the content ?
content , media = self . media_in_content ( content )
if media : # upload the media first
media_ids = toot_api . media_post ( media_file = media )
media_ids = [ media_ids ]
toot_api . status_post ( content , media_ids = media_ids )
status = True
except Exception as inst :
logger . critical ( "Mastodon ERR {}" . format ( inst ) )
status = False
update_result ( trigger_id , msg = inst , status = status )
return status |
def restore_default_configuration ( ) :
"""Restores the sys . stdout and the sys . stderr buffer streams to their default
values without regard to what step has currently overridden their values .
This is useful during cleanup outside of the running execution block""" | def restore ( target , default_value ) :
if target == default_value :
return default_value
if not isinstance ( target , RedirectBuffer ) :
return target
try :
target . active = False
target . close ( )
except Exception :
pass
return default_value
sys . stdout = restore ( sys . stdout , sys . __stdout__ )
sys . stderr = restore ( sys . stderr , sys . __stderr__ ) |
def get ( self , path , payload = None , headers = None ) :
"""HTTP GET operation .
: param path : URI Path
: param payload : HTTP Body
: param headers : HTTP Headers
: raises ApiError : Raises if the remote server encountered an error .
: raises ApiConnectionError : Raises if there was a connectivity issue .
: return : Response""" | return self . _request ( 'get' , path , payload , headers ) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.