signature
stringlengths 29
44.1k
| implementation
stringlengths 0
85.2k
|
|---|---|
def get_authentication_tokens ( self ) :
"""get _ auth _ url ( self )
Returns an authorization URL for a user to hit ."""
|
callback_url = self . callback_url or 'oob'
request_args = { }
if OAUTH_LIB_SUPPORTS_CALLBACK :
request_args [ 'callback_url' ] = callback_url
resp , content = self . client . request ( self . request_token_url , "GET" , ** request_args )
if resp [ 'status' ] != '200' :
raise AuthError ( "Seems something couldn't be verified " "withyour OAuth junk. Error: %s, Message: %s" % ( resp [ 'status' ] , content ) )
request_tokens = dict ( urlparse . parse_qsl ( content ) )
oauth_callback_confirmed = request_tokens . get ( 'oauth_callback_confirmed' ) == 'true'
if not OAUTH_LIB_SUPPORTS_CALLBACK and callback_url != 'oob' and oauth_callback_confirmed :
import warnings
warnings . warn ( "oauth2 library doesn't support OAuth 1.0a" " type callback, but remote requires it" )
oauth_callback_confirmed = False
auth_url_params = { 'oauth_token' : request_tokens [ 'oauth_token' ] }
# Use old - style callback argument
if callback_url != 'oob' and not oauth_callback_confirmed :
auth_url_params [ 'oauth_callback' ] = callback_url
request_tokens [ 'auth_url' ] = self . authenticate_url + '?' + urllib . urlencode ( auth_url_params )
return request_tokens
|
def endBy1 ( p , sep ) :
'''` endBy1 ( p , sep ) parses one or more occurrences of ` p ` , separated and
ended by ` sep ` . Returns a list of values returned by ` p ` .'''
|
return separated ( p , sep , 1 , maxt = float ( 'inf' ) , end = True )
|
def _raw_name_string ( bufr , strings_offset , str_offset , length ) :
"""Return the * length * bytes comprising the encoded string in * bufr * at
* str _ offset * in the strings area beginning at * strings _ offset * ."""
|
offset = strings_offset + str_offset
tmpl = '%ds' % length
return unpack_from ( tmpl , bufr , offset ) [ 0 ]
|
def size ( self , table = None ) :
"""Return the size , in bytes , of the profile or * table * .
If * table * is ` None ` , this function returns the size of the
whole profile ( i . e . the sum of the table sizes ) . Otherwise , it
returns the size of * table * .
Note : if the file is gzipped , it returns the compressed size ."""
|
size = 0
if table is None :
for table in self . relations :
size += self . size ( table )
else :
try :
fn = _table_filename ( os . path . join ( self . root , table ) )
size += os . stat ( fn ) . st_size
except ItsdbError :
pass
return size
|
def id ( self ) :
"""Computes the signature of the record , a SHA - 512 of significant values
: return : SHa - 512 Hex string"""
|
h = hashlib . new ( 'sha512' )
for value in ( self . machine . name , self . machine . os , self . user , self . application . name , self . application . path , self . event . report_type , self . event . type , self . event . time . isoformat ( ) ) :
h . update ( str ( value ) . encode ( 'utf-8' ) )
for parameter in sorted ( self . parameters , key = lambda k : getattr ( k , 'id' ) ) :
h . update ( parameter . value . encode ( 'utf-8' ) )
return h . hexdigest ( )
|
def create_intent ( self , workspace_id , intent , description = None , examples = None , ** kwargs ) :
"""Create intent .
Create a new intent .
This operation is limited to 2000 requests per 30 minutes . For more information ,
see * * Rate limiting * * .
: param str workspace _ id : Unique identifier of the workspace .
: param str intent : The name of the intent . This string must conform to the
following restrictions :
- It can contain only Unicode alphanumeric , underscore , hyphen , and dot
characters .
- It cannot begin with the reserved prefix ` sys - ` .
- It must be no longer than 128 characters .
: param str description : The description of the intent . This string cannot contain
carriage return , newline , or tab characters , and it must be no longer than 128
characters .
: param list [ Example ] examples : An array of user input examples for the intent .
: param dict headers : A ` dict ` containing the request headers
: return : A ` DetailedResponse ` containing the result , headers and HTTP status code .
: rtype : DetailedResponse"""
|
if workspace_id is None :
raise ValueError ( 'workspace_id must be provided' )
if intent is None :
raise ValueError ( 'intent must be provided' )
if examples is not None :
examples = [ self . _convert_model ( x , Example ) for x in examples ]
headers = { }
if 'headers' in kwargs :
headers . update ( kwargs . get ( 'headers' ) )
sdk_headers = get_sdk_headers ( 'conversation' , 'V1' , 'create_intent' )
headers . update ( sdk_headers )
params = { 'version' : self . version }
data = { 'intent' : intent , 'description' : description , 'examples' : examples }
url = '/v1/workspaces/{0}/intents' . format ( * self . _encode_path_vars ( workspace_id ) )
response = self . request ( method = 'POST' , url = url , headers = headers , params = params , json = data , accept_json = True )
return response
|
def _type_insert ( self , handle , key , value ) :
'''Insert the value into the series .'''
|
if value != 0 :
if isinstance ( value , float ) :
handle . incrbyfloat ( key , value )
else :
handle . incr ( key , value )
|
def ask_list ( question : str , default : list = None ) -> list :
"""Asks for a comma seperated list of strings"""
|
default_q = " [default: {0}]: " . format ( "," . join ( default ) ) if default is not None else ""
answer = input ( "{0} [{1}]: " . format ( question , default_q ) )
if answer == "" :
return default
return [ ans . strip ( ) for ans in answer . split ( "," ) ]
|
def _file_size ( file_path , uncompressed = False ) :
"""Return size of a single file , compressed or uncompressed"""
|
_ , ext = os . path . splitext ( file_path )
if uncompressed :
if ext in { ".gz" , ".gzip" } :
with gzip . GzipFile ( file_path , mode = "rb" ) as fp :
try :
fp . seek ( 0 , os . SEEK_END )
return fp . tell ( )
except ValueError : # on python2 , cannot seek from end and must instead read to end
fp . seek ( 0 )
while len ( fp . read ( 8192 ) ) != 0 :
pass
return fp . tell ( )
elif ext in { ".bz" , ".bz2" , ".bzip" , ".bzip2" } :
with bz2 . BZ2File ( file_path , mode = "rb" ) as fp :
fp . seek ( 0 , os . SEEK_END )
return fp . tell ( )
return os . path . getsize ( file_path )
|
def _lstree ( files , dirs ) :
"""Make git ls - tree like output ."""
|
for f , sha1 in files :
yield "100644 blob {}\t{}\0" . format ( sha1 , f )
for d , sha1 in dirs :
yield "040000 tree {}\t{}\0" . format ( sha1 , d )
|
def to_team ( team ) :
"""Serializes team to id string
: param team : object to serialize
: return : string id"""
|
from sevenbridges . models . team import Team
if not team :
raise SbgError ( 'Team is required!' )
elif isinstance ( team , Team ) :
return team . id
elif isinstance ( team , six . string_types ) :
return team
else :
raise SbgError ( 'Invalid team parameter!' )
|
def wb_db004 ( self , value = None ) :
"""Corresponds to IDD Field ` wb _ db004 `
mean coincident wet - bulb temperature to
Dry - bulb temperature corresponding to 0.4 % annual cumulative frequency of occurrence ( warm conditions )
Args :
value ( float ) : value for IDD Field ` wb _ db004 `
Unit : C
if ` value ` is None it will not be checked against the
specification and is assumed to be a missing value
Raises :
ValueError : if ` value ` is not a valid value"""
|
if value is not None :
try :
value = float ( value )
except ValueError :
raise ValueError ( 'value {} need to be of type float ' 'for field `wb_db004`' . format ( value ) )
self . _wb_db004 = value
|
def _cx_counters_psutil ( self , tags = None ) :
"""Collect metrics about interfaces counters using psutil"""
|
tags = [ ] if tags is None else tags
for iface , counters in iteritems ( psutil . net_io_counters ( pernic = True ) ) :
metrics = { 'bytes_rcvd' : counters . bytes_recv , 'bytes_sent' : counters . bytes_sent , 'packets_in.count' : counters . packets_recv , 'packets_in.error' : counters . errin , 'packets_out.count' : counters . packets_sent , 'packets_out.error' : counters . errout , }
self . _submit_devicemetrics ( iface , metrics , tags )
|
def load ( self ) :
"""Loads the prefixes that are available is the workdir
Returns :
None
Raises :
MalformedWorkdir : if the wordir is malformed"""
|
if self . loaded :
LOGGER . debug ( 'Already loaded' )
return
try :
basepath , dirs , _ = os . walk ( self . path ) . next ( )
except StopIteration :
raise MalformedWorkdir ( 'Empty dir %s' % self . path )
full_path = partial ( os . path . join , basepath )
found_current = False
for dirname in dirs :
if dirname == 'current' and os . path . islink ( full_path ( 'current' ) ) :
self . current = os . path . basename ( os . readlink ( full_path ( 'current' ) ) )
found_current = True
continue
elif dirname == 'current' :
raise MalformedWorkdir ( '"%s/current" should be a soft link' % self . path )
self . prefixes [ dirname ] = self . prefix_class ( prefix = self . join ( dirname ) )
if not found_current :
raise MalformedWorkdir ( '"%s/current" should exist and be a soft link' % self . path )
self . _update_current ( )
|
def update_checksum ( self , progress_callback = None , chunk_size = None , checksum_kwargs = None , ** kwargs ) :
"""Update checksum based on file ."""
|
self . checksum = self . storage ( ** kwargs ) . checksum ( progress_callback = progress_callback , chunk_size = chunk_size , ** ( checksum_kwargs or { } ) )
|
def _validate_name ( name ) :
'''Checks if the provided name fits Linode ' s labeling parameters .
. . versionadded : : 2015.5.6
name
The VM name to validate'''
|
name = six . text_type ( name )
name_length = len ( name )
regex = re . compile ( r'^[a-zA-Z0-9][A-Za-z0-9_-]*[a-zA-Z0-9]$' )
if name_length < 3 or name_length > 48 :
ret = False
elif not re . match ( regex , name ) :
ret = False
else :
ret = True
if ret is False :
log . warning ( 'A Linode label may only contain ASCII letters or numbers, dashes, and ' 'underscores, must begin and end with letters or numbers, and be at least ' 'three characters in length.' )
return ret
|
def save_function_effect ( module ) :
"""Recursively save function effect for pythonic functions ."""
|
for intr in module . values ( ) :
if isinstance ( intr , dict ) : # Submodule case
save_function_effect ( intr )
else :
fe = FunctionEffects ( intr )
IntrinsicArgumentEffects [ intr ] = fe
if isinstance ( intr , intrinsic . Class ) :
save_function_effect ( intr . fields )
|
def handle_program_options ( ) :
"""Uses the built - in argparse module to handle command - line options for the
program .
: return : The gathered command - line options specified by the user
: rtype : argparse . ArgumentParser"""
|
parser = argparse . ArgumentParser ( description = "Convert Sanger-sequencing \
derived data files for use with the \
metagenomics analysis program QIIME, by \
extracting Sample ID information, adding\
barcodes and primers to the sequence \
data, and outputting a mapping file and\
single FASTA-formatted sequence file \
formed by concatenating all input data." )
parser . add_argument ( '-i' , '--input_dir' , required = True , help = "The directory containing sequence data files. \
Assumes all data files are placed in this \
directory. For files organized within folders by\
sample, use -s in addition." )
parser . add_argument ( '-m' , '--map_file' , default = 'map.txt' , help = "QIIME-formatted mapping file linking Sample IDs \
with barcodes and primers." )
parser . add_argument ( '-o' , '--output' , default = 'output.fasta' , metavar = 'OUTPUT_FILE' , help = "Single file containing all sequence data found \
in input_dir, FASTA-formatted with barcode and \
primer preprended to sequence. If the -q option \
is passed, any quality data will also be output \
to a single file of the same name with a .qual \
extension." )
parser . add_argument ( '-b' , '--barcode_length' , type = int , default = 12 , help = "Length of the generated barcode sequences. \
Default is 12 (QIIME default), minimum is 8." )
parser . add_argument ( '-q' , '--qual' , action = 'store_true' , default = False , help = "Instruct the program to look for quality \
input files" )
parser . add_argument ( '-u' , '--utf16' , action = 'store_true' , default = False , help = "UTF-16 encoded input files" )
parser . add_argument ( '-t' , '--treatment' , help = "Inserts an additional column into the mapping \
file specifying some treatment or other variable\
that separates the current set of sequences \
from any other set of seqeunces. For example:\
-t DiseaseState=healthy" )
# data input options
sidGroup = parser . add_mutually_exclusive_group ( required = True )
sidGroup . add_argument ( '-d' , '--identifier_pattern' , action = ValidateIDPattern , nargs = 2 , metavar = ( 'SEPARATOR' , 'FIELD_NUMBER' ) , help = "Indicates how to extract the Sample ID from \
the description line. Specify two things: \
1. Field separator, 2. Field number of Sample \
ID (1 or greater). If the separator is a space \
or tab, use \s or \\t respectively. \
Example: >ka-SampleID-2091, use -i - 2, \
indicating - is the separator and the Sample ID\
is field #2." )
sidGroup . add_argument ( '-f' , '--filename_sample_id' , action = 'store_true' , default = False , help = 'Specify that the program should\
the name of each fasta file as the Sample ID for use\
in the mapping file. This is meant to be used when \
all sequence data for a sample is stored in a single\
file.' )
return parser . parse_args ( )
|
def kill_raylet ( self , check_alive = True ) :
"""Kill the raylet .
Args :
check _ alive ( bool ) : Raise an exception if the process was already
dead ."""
|
self . _kill_process_type ( ray_constants . PROCESS_TYPE_RAYLET , check_alive = check_alive )
|
def init_user ( ) :
"""Create and populate the ~ / . config / rapport directory tree if it ' s not existing .
Doesn ' t interfere with already existing directories or configuration files ."""
|
if not os . path . exists ( USER_CONFIG_DIR ) :
if rapport . config . get_int ( "rapport" , "verbosity" ) >= 1 :
print ( "Create user directory {0}" . format ( USER_CONFIG_DIR ) )
os . makedirs ( USER_CONFIG_DIR )
for subdir in [ "plugins" , "reports" , "templates/plugin" , "templates/email" , "templates/web" ] :
user_conf_subdir = os . path . join ( USER_CONFIG_DIR , subdir )
if not os . path . exists ( user_conf_subdir ) :
if rapport . config . get_int ( "rapport" , "verbosity" ) >= 1 :
print ( "Create user directory {0}" . format ( user_conf_subdir ) )
os . makedirs ( user_conf_subdir )
if subdir == "reports" and not ( os . stat ( user_conf_subdir ) . st_mode & 0o777 ) == 0o700 :
if rapport . config . get_int ( "rapport" , "verbosity" ) >= 1 :
print ( "Set secure directory permissions for {0}" . format ( user_conf_subdir ) )
os . chmod ( user_conf_subdir , 0o700 )
if not os . path . exists ( USER_CONFIG_FILE ) :
if rapport . config . get_int ( "rapport" , "verbosity" ) >= 1 :
print ( "Create user configuration {0}" . format ( USER_CONFIG_FILE ) )
default_config = os . path . abspath ( os . path . join ( os . path . splitext ( __file__ ) [ 0 ] , "rapport.conf" ) )
shutil . copyfile ( default_config , USER_CONFIG_FILE )
if not ( os . stat ( USER_CONFIG_FILE ) . st_mode & 0o777 ) == 0o600 :
if rapport . config . get_int ( "rapport" , "verbosity" ) >= 1 :
print ( "Set secure file permissions for {0}" . format ( USER_CONFIG_FILE ) )
os . chmod ( USER_CONFIG_FILE , 0o600 )
|
def modify ( self , entry_id , ** kw ) :
'''Incremental version of : meth : ` update ` : instead of updating all
attributes of the entry to the specified values , this method only
updates the specified keywords . Returns True on success , or raises
an exception on failure .
Note : the default implementation assumes fast and sequential access
and reads the current version , updates the specified values , and
calls : meth : ` update ` .'''
|
entry = self . read ( entry_id )
for k , v in kw . items ( ) :
setattr ( entry , k , v )
self . update ( entry )
return True
|
def symbol_scores ( self , symbol ) :
"""Find matches for symbol .
: param symbol : A . separated symbol . eg . ' os . path . basename '
: returns : A list of tuples of ( score , package , reference | None ) ,
ordered by score from highest to lowest ."""
|
scores = [ ]
path = [ ]
# sys . path sys path - > import sys
# os . path . basename os . path basename - > import os . path
# basename os . path basename - > from os . path import basename
# path . basename os . path basename - > from os import path
def fixup ( module , variable ) :
prefix = module . split ( '.' )
if variable is not None :
prefix . append ( variable )
seeking = symbol . split ( '.' )
new_module = [ ]
while prefix and seeking [ 0 ] != prefix [ 0 ] :
new_module . append ( prefix . pop ( 0 ) )
if new_module :
module , variable = '.' . join ( new_module ) , prefix [ 0 ]
else :
variable = None
return module , variable
def score_walk ( scope , scale ) :
sub_path , score = self . _score_key ( scope , full_key )
if score > 0.1 :
try :
i = sub_path . index ( None )
sub_path , from_symbol = sub_path [ : i ] , '.' . join ( sub_path [ i + 1 : ] )
except ValueError :
from_symbol = None
package_path = '.' . join ( path + sub_path )
package_path , from_symbol = fixup ( package_path , from_symbol )
scores . append ( ( score * scale , package_path , from_symbol ) )
for key , subscope in scope . _tree . items ( ) :
if type ( subscope ) is not float :
path . append ( key )
score_walk ( subscope , subscope . score * scale - 0.1 )
path . pop ( )
full_key = symbol . split ( '.' )
score_walk ( self , 1.0 )
scores . sort ( reverse = True )
return scores
|
def get_bounce_dump ( bounce_id , api_key = None , secure = None , test = None , ** request_args ) :
'''Get the raw email dump for a single bounce .
: param bounce _ id : The bounce ' s id . Get the id with : func : ` get _ bounces ` .
: param api _ key : Your Postmark API key . Required , if ` test ` is not ` True ` .
: param secure : Use the https scheme for the Postmark API .
Defaults to ` True `
: param test : Use the Postmark Test API . Defaults to ` False ` .
: param \ * \ * request _ args : Keyword arguments to pass to
: func : ` requests . request ` .
: rtype : : class : ` BounceDumpResponse `'''
|
return _default_bounce_dump . get ( bounce_id , api_key = api_key , secure = secure , test = test , ** request_args )
|
def _add_jobs ( self ) :
"""Add configured jobs ."""
|
for name , params in self . jobs . items ( ) :
if params . active :
params . handler = params . handler ( params )
self . sched . add_cron_job ( params . handler . run , ** params . schedule )
|
def authorized_handler ( self , f ) :
"""Decorator for the route that is used as the callback for authorizing
with GitHub . This callback URL can be set in the settings for the app
or passed in during authorization ."""
|
@ wraps ( f )
def decorated ( * args , ** kwargs ) :
if 'code' in request . args :
data = self . _handle_response ( )
else :
data = self . _handle_invalid_response ( )
return f ( * ( ( data , ) + args ) , ** kwargs )
return decorated
|
def shape ( self ) -> Tuple [ int , int ] :
"""Required shape of | NetCDFVariableAgg . array | .
For the default configuration , the first axis corresponds to the
number of devices , and the second one to the number of timesteps .
We show this for the 1 - dimensional input sequence | lland _ fluxes . NKor | :
> > > from hydpy . core . examples import prepare _ io _ example _ 1
> > > nodes , elements = prepare _ io _ example _ 1 ( )
> > > from hydpy . core . netcdftools import NetCDFVariableAgg
> > > ncvar = NetCDFVariableAgg ( ' flux _ nkor ' , isolate = False , timeaxis = 1)
> > > for element in elements :
. . . ncvar . log ( element . model . sequences . fluxes . nkor , None )
> > > ncvar . shape
(3 , 4)
When using the first axis as the " timeaxis " , the order of | tuple |
entries turns :
> > > ncvar = NetCDFVariableAgg ( ' flux _ nkor ' , isolate = False , timeaxis = 0)
> > > for element in elements :
. . . ncvar . log ( element . model . sequences . fluxes . nkor , None )
> > > ncvar . shape
(4 , 3)"""
|
return self . sort_timeplaceentries ( len ( hydpy . pub . timegrids . init ) , len ( self . sequences ) )
|
def register_composite ( cls , name , handle = None , factory = None ) :
"""Maps a Postgresql type to this class . If the class ' s * table * attribute
is empty , and the class has an attribute * pg _ type * of tuple ( schema , type ) ,
it is calculated and set by querying Postgres . Register inherited / inheriting
classes in heirarchical order .
Every time a SQL function returns a registered type ( including array
elements and individual columns , recursively ) , this class
will be instantiated automatically .
The object attributes will be passed to the provided callable in
a form of keyword arguments .
: param str name : the name of a PostgreSQL composite type , e . g . created using
the * CREATE TYPE * command
: param simpycity . handle . Handle handle :
: param psycopg2 . extras . CompositeCaster factory : use
it to customize how to cast composite types
: return : the registered * CompositeCaster * instance
responsible for the conversion"""
|
class CustomCompositeCaster ( psycopg2 . extras . CompositeCaster ) :
def make ( self , values ) :
d_out ( "CustomCompositeCaster.make: cls={0} values={1}" . format ( repr ( cls ) , repr ( values ) ) )
return cls ( ** dict ( list ( zip ( self . attnames , values ) ) ) )
PG_TYPE_SQL = """SELECT array_agg(attname)
FROM
(
SELECT attname
FROM
pg_type t
JOIN pg_namespace ns ON typnamespace = ns.oid
JOIN pg_attribute a ON attrelid = typrelid
WHERE nspname = %s AND typname = %s
AND attnum > 0 AND NOT attisdropped
ORDER BY attnum
) sub;"""
if handle is None :
handle = g_config . handle_factory ( )
d_out ( "SimpleModel.register_composite: before: table for {0} is {1}" . format ( repr ( cls . pg_type ) , cls . table ) )
if cls . pg_type is not None :
super_table = cls . __mro__ [ 1 ] . table if hasattr ( cls . __mro__ [ 1 ] , 'table' ) else [ ]
if cls . table == [ ] or cls . table is super_table :
cursor = handle . cursor ( )
cursor . execute ( PG_TYPE_SQL , cls . pg_type )
row = cursor . fetchone ( )
d_out ( "SimpleModel.register_composite: row={0}" . format ( row ) )
row [ 0 ] = [ _ for _ in row [ 0 ] if _ != 'base_' ]
cls . table = cls . table + row [ 0 ]
d_out ( "SimpleModel.register_composite: after: table for {0} is {1}" . format ( repr ( cls . pg_type ) , cls . table ) )
if factory is None :
factory = CustomCompositeCaster
if sys . version_info [ 0 ] < 3 :
name = str ( name )
return psycopg2 . extras . register_composite ( name , handle . conn , globally = True , # in case of reconnects
factory = factory )
|
def mutationhash ( strings , nedit ) :
"""produce a hash with each key a nedit distance substitution for a set of
strings . values of the hash is the set of strings the substitution could
have come from"""
|
maxlen = max ( [ len ( string ) for string in strings ] )
indexes = generate_idx ( maxlen , nedit )
muthash = defaultdict ( set )
for string in strings :
muthash [ string ] . update ( [ string ] )
for x in substitution_set ( string , indexes ) :
muthash [ x ] . update ( [ string ] )
return muthash
|
def parse_quotes ( cmd , quotes = True , string = True ) :
"""parses quotes"""
|
import shlex
try :
args = shlex . split ( cmd ) if quotes else cmd . split ( )
except ValueError as exception :
logger . error ( exception )
return [ ]
return [ str ( arg ) for arg in args ] if string else args
|
def OSLibraries ( self ) :
"""Microsoft Windows SDK Libraries"""
|
if self . vc_ver <= 10.0 :
arch_subdir = self . pi . target_dir ( hidex86 = True , x64 = True )
return [ os . path . join ( self . si . WindowsSdkDir , 'Lib%s' % arch_subdir ) ]
else :
arch_subdir = self . pi . target_dir ( x64 = True )
lib = os . path . join ( self . si . WindowsSdkDir , 'lib' )
libver = self . _sdk_subdir
return [ os . path . join ( lib , '%sum%s' % ( libver , arch_subdir ) ) ]
|
def getWinner ( self , type = 'activation' ) :
"""Returns the winner of the type specified { ' activation ' or
' target ' } ."""
|
maxvalue = - 10000
maxpos = - 1
ttlvalue = 0
if type == 'activation' :
ttlvalue = Numeric . add . reduce ( self . activation )
maxpos = Numeric . argmax ( self . activation )
maxvalue = self . activation [ maxpos ]
elif type == 'target' : # note that backprop ( ) resets self . targetSet flag
if self . verify and self . targetSet == 0 :
raise LayerError ( 'getWinner() called with \'target\' but target has not been set.' , self . targetSet )
ttlvalue = Numeric . add . reduce ( self . target )
maxpos = Numeric . argmax ( self . target )
maxvalue = self . target [ maxpos ]
else :
raise LayerError ( 'getWinner() called with unknown layer attribute.' , type )
if self . size > 0 :
avgvalue = ttlvalue / float ( self . size )
else :
raise LayerError ( 'getWinner() called for layer of size zero.' , self . size )
return maxpos , maxvalue , avgvalue
|
def join ( self ) :
"""Wait for root state to finish execution"""
|
self . _root_state . join ( )
# execution finished , close execution history log file ( if present )
if len ( self . _execution_histories ) > 0 :
if self . _execution_histories [ - 1 ] . execution_history_storage is not None :
set_read_and_writable_for_all = global_config . get_config_value ( "EXECUTION_LOG_SET_READ_AND_WRITABLE_FOR_ALL" , False )
self . _execution_histories [ - 1 ] . execution_history_storage . close ( set_read_and_writable_for_all )
from rafcon . core . states . state import StateExecutionStatus
self . _root_state . state_execution_status = StateExecutionStatus . INACTIVE
|
def do_exists ( self , params ) :
"""\x1b [1mNAME \x1b [0m
exists - Gets the znode ' s stat information
\x1b [1mSYNOPSIS \x1b [0m
exists < path > [ watch ] [ pretty _ date ]
\x1b [1mOPTIONS \x1b [0m
* watch : set a ( data ) watch on the path ( default : false )
\x1b [1mEXAMPLES \x1b [0m
exists / foo
Stat (
czxid = 101,
mzxid = 102,
ctime = 1382820644375,
mtime = 1382820693801,
version = 1,
cversion = 0,
aversion = 0,
ephemeralOwner = 0,
dataLength = 6,
numChildren = 0,
pzxid = 101
# sets a watch
> exists / foo true
# trigger the watch
> rm / foo
WatchedEvent ( type = ' DELETED ' , state = ' CONNECTED ' , path = u ' / foo ' )"""
|
watcher = lambda evt : self . show_output ( str ( evt ) )
kwargs = { "watch" : watcher } if params . watch else { }
pretty = params . pretty_date
path = self . resolve_path ( params . path )
stat = self . _zk . exists ( path , ** kwargs )
if stat :
session = stat . ephemeralOwner if stat . ephemeralOwner else 0
self . show_output ( "Stat(" )
self . show_output ( " czxid=0x%x" , stat . czxid )
self . show_output ( " mzxid=0x%x" , stat . mzxid )
self . show_output ( " ctime=%s" , time . ctime ( stat . created ) if pretty else stat . ctime )
self . show_output ( " mtime=%s" , time . ctime ( stat . last_modified ) if pretty else stat . mtime )
self . show_output ( " version=%s" , stat . version )
self . show_output ( " cversion=%s" , stat . cversion )
self . show_output ( " aversion=%s" , stat . aversion )
self . show_output ( " ephemeralOwner=0x%x" , session )
self . show_output ( " dataLength=%s" , stat . dataLength )
self . show_output ( " numChildren=%s" , stat . numChildren )
self . show_output ( " pzxid=0x%x" , stat . pzxid )
self . show_output ( ")" )
else :
self . show_output ( "Path %s doesn't exist" , params . path )
|
def between ( min_value , max_value ) :
'Numerical values limit'
|
message = N_ ( 'value should be between %(min)d and %(max)d' ) % dict ( min = min_value , max = max_value )
@ validator ( message )
def wrapper ( conv , value ) :
if value is None : # it meens that this value is not required
return True
if value < min_value :
return False
if value > max_value :
return False
return True
return wrapper
|
def _htpasswd ( username , password , ** kwargs ) :
'''Provide authentication via Apache - style htpasswd files'''
|
from passlib . apache import HtpasswdFile
pwfile = HtpasswdFile ( kwargs [ 'filename' ] )
# passlib below version 1.6 uses ' verify ' function instead of ' check _ password '
if salt . utils . versions . version_cmp ( kwargs [ 'passlib_version' ] , '1.6' ) < 0 :
return pwfile . verify ( username , password )
else :
return pwfile . check_password ( username , password )
|
def template ( * args , ** kwargs ) :
'''Get a rendered template as a string iterator .
You can use a name , a filename or a template string as first parameter .
Template rendering arguments can be passed as dictionaries
or directly ( as keyword arguments ) .'''
|
tpl = args [ 0 ] if args else None
template_adapter = kwargs . pop ( 'template_adapter' , SimpleTemplate )
if tpl not in TEMPLATES or DEBUG :
settings = kwargs . pop ( 'template_settings' , { } )
lookup = kwargs . pop ( 'template_lookup' , TEMPLATE_PATH )
if isinstance ( tpl , template_adapter ) :
TEMPLATES [ tpl ] = tpl
if settings :
TEMPLATES [ tpl ] . prepare ( ** settings )
elif "\n" in tpl or "{" in tpl or "%" in tpl or '$' in tpl :
TEMPLATES [ tpl ] = template_adapter ( source = tpl , lookup = lookup , ** settings )
else :
TEMPLATES [ tpl ] = template_adapter ( name = tpl , lookup = lookup , ** settings )
if not TEMPLATES [ tpl ] :
abort ( 500 , 'Template (%s) not found' % tpl )
for dictarg in args [ 1 : ] :
kwargs . update ( dictarg )
return TEMPLATES [ tpl ] . render ( kwargs )
|
def replace_if_changed ( localfile , jottapath , JFS ) :
"""Compare md5 hash to determine if contents have changed .
Upload a file from local disk and replace file on JottaCloud if the md5s differ ,
or continue uploading if the file is incompletely uploaded .
Returns the JottaFile object"""
|
jf = JFS . getObject ( jottapath )
lf_hash = getxattrhash ( localfile )
# try to read previous hash , stored in xattr
if lf_hash is None : # no valid hash found in xattr ,
with open ( localfile ) as lf :
lf_hash = calculate_md5 ( lf )
# ( re ) calculate it
if type ( jf ) == JFSIncompleteFile :
log . debug ( "Local file %s is incompletely uploaded, continue" , localfile )
return resume ( localfile , jf , JFS )
elif jf . md5 == lf_hash : # hashes are the same
log . debug ( "hash match (%s), file contents haven't changed" , lf_hash )
setxattrhash ( localfile , lf_hash )
return jf
# return the version from jottaclouds
else :
setxattrhash ( localfile , lf_hash )
return new ( localfile , jottapath , JFS )
|
def keys ( self , history = None ) :
"""Get the set of I { all } property names .
@ param history : A history of nodes checked to prevent
circular hunting .
@ type history : [ L { Properties } , . . ]
@ return : A set of property names .
@ rtype : list"""
|
if history is None :
history = [ ]
history . append ( self )
keys = set ( )
keys . update ( self . definitions . keys ( ) )
for x in self . links :
if x in history :
continue
keys . update ( x . keys ( history ) )
history . remove ( self )
return keys
|
def childgroup ( self , field ) :
"""Return a list of fields stored by row regarding the configured grid
: param field : The original field this widget is attached to"""
|
grid = getattr ( self , "grid" , None )
named_grid = getattr ( self , "named_grid" , None )
if grid is not None :
childgroup = self . _childgroup ( field . children , grid )
elif named_grid is not None :
childgroup = self . _childgroup_by_name ( field . children , named_grid )
else :
raise AttributeError ( u"Missing the grid or named_grid argument" )
return childgroup
|
def quit_banner ( self ) -> None :
"""Print a banner for running the system"""
|
print ( "=" * 80 )
print ( "Done." )
print ( dtm . datetime . now ( ) . strftime ( "%Y/%m/%d - %H:%M:%S" ) )
print ( "=" * 80 )
|
from typing import List
from collections import Counter
def count_element_frequency ( elements : List [ int ] ) -> dict :
"""Calculate frequency of each element in the given list .
Examples :
> > > count _ element _ frequency ( [ 10 , 10 , 10 , 10 , 20 , 20 , 20 , 20 , 40 , 40 , 50 , 50 , 30 ] )
{10 : 4 , 20 : 4 , 40 : 2 , 50 : 2 , 30 : 1}
> > > count _ element _ frequency ( [ 1 , 2 , 3 , 4 , 3 , 2 , 4 , 1 , 3 , 1 , 4 ] )
{1 : 3 , 2 : 2 , 3 : 3 , 4 : 3}
> > > count _ element _ frequency ( [ 5 , 6 , 7 , 4 , 9 , 10 , 4 , 5 , 6 , 7 , 9 , 5 ] )
{5 : 3 , 6 : 2 , 7 : 2 , 4 : 2 , 9 : 2 , 10 : 1}"""
|
return Counter ( elements )
|
def to_plain_text ( str ) :
'''Return a plain - text version of a given string
This is a dumb approach that tags and then removing entity markers
but this is fine for the content from biocyc where entities are & beta ; etc .
Stripping in this way turns these into plaintext ' beta ' which is preferable
to unicode'''
|
str = strip_tags_re . sub ( '' , str )
str = strip_entities_re . sub ( '' , str )
return str
|
def connect_delete_namespaced_pod_proxy ( self , name , namespace , ** kwargs ) : # noqa : E501
"""connect _ delete _ namespaced _ pod _ proxy # noqa : E501
connect DELETE requests to proxy of Pod # noqa : E501
This method makes a synchronous HTTP request by default . To make an
asynchronous HTTP request , please pass async _ req = True
> > > thread = api . connect _ delete _ namespaced _ pod _ proxy ( name , namespace , async _ req = True )
> > > result = thread . get ( )
: param async _ req bool
: param str name : name of the PodProxyOptions ( required )
: param str namespace : object name and auth scope , such as for teams and projects ( required )
: param str path : Path is the URL path to use for the current proxy request to pod .
: return : str
If the method is called asynchronously ,
returns the request thread ."""
|
kwargs [ '_return_http_data_only' ] = True
if kwargs . get ( 'async_req' ) :
return self . connect_delete_namespaced_pod_proxy_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
else :
( data ) = self . connect_delete_namespaced_pod_proxy_with_http_info ( name , namespace , ** kwargs )
# noqa : E501
return data
|
def _keyword_expander ( word , language , lemmatized = False , threshold = 0.70 ) :
"""Find similar terms in Word2Vec models . Accepts string and returns a
list of terms of n similarity .
: rtype : list"""
|
try :
from cltk . vector . word2vec import get_sims
except ImportError as imp_err :
print ( imp_err )
raise
similar_vectors = get_sims ( word , language , lemmatized = lemmatized , threshold = threshold )
return similar_vectors
|
def get_catalog_metadata ( catalog , exclude_meta_fields = None ) :
"""Devuelve sólo la metadata de nivel catálogo ."""
|
exclude_meta_fields = exclude_meta_fields or [ ]
catalog_dict_copy = catalog . copy ( )
del catalog_dict_copy [ "dataset" ]
for excluded_meta_field in exclude_meta_fields :
catalog_dict_copy . pop ( excluded_meta_field , None )
return catalog_dict_copy
|
def setdefault ( self , sid , value , dtype = F64 ) :
"""Works like ` dict . setdefault ` : if the ` sid ` key is missing , it fills
it with an array and returns the associate ProbabilityCurve
: param sid : site ID
: param value : value used to fill the returned ProbabilityCurve
: param dtype : dtype used internally ( F32 or F64)"""
|
try :
return self [ sid ]
except KeyError :
array = numpy . empty ( ( self . shape_y , self . shape_z ) , dtype )
array . fill ( value )
pc = ProbabilityCurve ( array )
self [ sid ] = pc
return pc
|
def compiled_init_func ( self ) :
"""Returns compiled init function"""
|
def get_column_assignment ( column_name ) :
return ALCHEMY_TEMPLATES . col_assignment . safe_substitute ( col_name = column_name )
def get_compiled_args ( arg_name ) :
return ALCHEMY_TEMPLATES . func_arg . safe_substitute ( arg_name = arg_name )
join_string = "\n" + self . tab + self . tab
column_assignments = join_string . join ( [ get_column_assignment ( n ) for n in self . columns ] )
init_args = ", " . join ( get_compiled_args ( n ) for n in self . columns )
return ALCHEMY_TEMPLATES . init_function . safe_substitute ( col_assignments = column_assignments , init_args = init_args )
|
def process_entries ( self , omimids , transform , included_fields = None , graph = None , limit = None , globaltt = None ) :
"""Given a list of omim ids ,
this will use the omim API to fetch the entries , according to the
` ` ` included _ fields ` ` ` passed as a parameter .
If a transformation function is supplied ,
this will iterate over each entry ,
and either add the results to the supplied ` ` ` graph ` ` `
or will return a set of processed entries that the calling function
can further iterate .
If no ` ` ` included _ fields ` ` ` are provided , this will simply fetch
the basic entry from omim ,
which includes an entry ' s : prefix , mimNumber , status , and titles .
: param omimids : the set of omim entry ids to fetch using their API
: param transform : Function to transform each omim entry when looping
: param included _ fields : A set of what fields are required to retrieve
from the API
: param graph : the graph to add the transformed data into
: return :"""
|
omimparams = { }
# add the included _ fields as parameters
if included_fields is not None and included_fields :
omimparams [ 'include' ] = ',' . join ( included_fields )
processed_entries = list ( )
# scrub any omim prefixes from the omimids before processing
# cleanomimids = set ( )
# for omimid in omimids :
# scrubbed = str ( omimid ) . split ( ' : ' ) [ - 1]
# if re . match ( r ' ^ \ d + $ ' , str ( scrubbed ) ) :
# cleanomimids . update ( scrubbed )
# omimids = list ( cleanomimids )
cleanomimids = [ o . split ( ':' ) [ - 1 ] for o in omimids ]
diff = set ( omimids ) - set ( cleanomimids )
if diff :
LOG . warning ( 'OMIM has %i dirty bits see"\n %s' , len ( diff ) , str ( diff ) )
omimids = cleanomimids
else :
cleanomimids = list ( )
acc = 0
# for counting
# note that you can only do request batches of 20
# see info about " Limits " at http : / / omim . org / help / api
# TODO 2017 May seems a majority of many groups of 20
# are producing python None for RDF triple Objects
groupsize = 20
if not self . test_mode and limit is not None : # just in case the limit is larger than the number of records ,
maxit = limit
if limit > len ( omimids ) :
maxit = len ( omimids )
else :
maxit = len ( omimids )
while acc < maxit :
end = min ( ( maxit , acc + groupsize ) )
# iterate through the omim ids list ,
# and fetch from the OMIM api in batches of 20
if self . test_mode :
intersect = list ( set ( [ str ( i ) for i in self . test_ids ] ) & set ( omimids [ acc : end ] ) )
# some of the test ids are in the omimids
if intersect :
LOG . info ( "found test ids: %s" , intersect )
omimparams . update ( { 'mimNumber' : ',' . join ( intersect ) } )
else :
acc += groupsize
continue
else :
omimparams . update ( { 'mimNumber' : ',' . join ( omimids [ acc : end ] ) } )
url = OMIMAPI + urllib . parse . urlencode ( omimparams )
try :
req = urllib . request . urlopen ( url )
except HTTPError as e : # URLError ?
LOG . warning ( 'fetching: %s' , url )
error_msg = e . read ( )
if re . search ( r'The API key: .* is invalid' , str ( error_msg ) ) :
msg = "API Key not valid"
raise HTTPError ( url , e . code , msg , e . hdrs , e . fp )
LOG . error ( "Failed with: %s" , str ( error_msg ) )
break
resp = req . read ( ) . decode ( )
acc += groupsize
myjson = json . loads ( resp )
# snag a copy
with open ( './raw/omim/_' + str ( acc ) + '.json' , 'w' ) as fp :
json . dump ( myjson , fp )
entries = myjson [ 'omim' ] [ 'entryList' ]
for e in entries : # apply the data transformation , and save it to the graph
processed_entry = transform ( e , graph , globaltt )
if processed_entry is not None :
processed_entries . append ( processed_entry )
# # # # end iterating over batch of entries
return processed_entries
|
def weighted_random ( sample , embedding ) :
"""Determines the sample values by weighed random choice .
Args :
sample ( dict ) : A sample of the form { v : val , . . . } where v is
a variable in the target graph and val is the associated value as
determined by a binary quadratic model sampler .
embedding ( dict ) : The mapping from the source graph to the target graph .
Should be of the form { v : { s , . . . } , . . . } where v is a node in the
source graph and s is a node in the target graph .
Yields :
dict : The unembedded sample . When there is a chain break , the value
is chosen randomly , weighted by the frequency of the values
within the chain ."""
|
unembeded = { }
for v , chain in iteritems ( embedding ) :
vals = [ sample [ u ] for u in chain ]
# pick a random element uniformly from all vals , this weights them by
# the proportion of each
unembeded [ v ] = random . choice ( vals )
yield unembeded
|
def insertFile ( self , qInserts = False ) :
"""API to insert a list of file into DBS in DBS . Up to 10 files can be inserted in one request .
: param qInserts : True means that inserts will be queued instead of done immediately . INSERT QUEUE Manager will perform the inserts , within few minutes .
: type qInserts : bool
: param filesList : List of dictionaries containing following information
: type filesList : list of dicts
: key logical _ file _ name : File to be inserted ( str ) ( Required )
: key is _ file _ valid : ( optional , default = 1 ) : ( bool )
: key block : required : / a / b / c # d ( str )
: key dataset : required : / a / b / c ( str )
: key file _ type : ( optional , default = EDM ) one of the predefined types , ( str )
: key check _ sum : ( optional , default = ' - 1 ' ) ( str )
: key event _ count : ( optional , default = - 1 ) ( int )
: key file _ size : ( optional , default = - 1 . ) ( float )
: key adler32 : ( optional , default = ' ' ) ( str )
: key md5 : ( optional , default = ' ' ) ( str )
: key auto _ cross _ section : ( optional , default = - 1 . ) ( float )
: key file _ lumi _ list : ( optional , default = [ ] ) [ { ' run _ num ' : 123 , ' lumi _ section _ num ' : 12 } , { } . . . . ]
: key file _ parent _ list : ( optional , default = [ ] ) [ { ' file _ parent _ lfn ' : ' mylfn ' } , { } . . . . ]
: key file _ assoc _ list : ( optional , default = [ ] ) [ { ' file _ parent _ lfn ' : ' mylfn ' } , { } . . . . ]
: key file _ output _ config _ list : ( optional , default = [ ] ) [ { ' app _ name ' : . . . , ' release _ version ' : . . . , ' pset _ hash ' : . . . . , output _ module _ label ' : . . . } , { } . . . . . ]"""
|
if qInserts in ( False , 'False' ) :
qInserts = False
try :
body = request . body . read ( )
indata = cjson . decode ( body ) [ "files" ]
if not isinstance ( indata , ( list , dict ) ) :
dbsExceptionHandler ( "dbsException-invalid-input" , "Invalid Input DataType" , self . logger . exception , "insertFile expects input as list or dirc" )
businput = [ ]
if isinstance ( indata , dict ) :
indata = [ indata ]
indata = validateJSONInputNoCopy ( "files" , indata )
for f in indata :
f . update ( { # " dataset " : f [ " dataset " ] ,
"creation_date" : f . get ( "creation_date" , dbsUtils ( ) . getTime ( ) ) , "create_by" : dbsUtils ( ) . getCreateBy ( ) , "last_modification_date" : f . get ( "last_modification_date" , dbsUtils ( ) . getTime ( ) ) , "last_modified_by" : f . get ( "last_modified_by" , dbsUtils ( ) . getCreateBy ( ) ) , "file_lumi_list" : f . get ( "file_lumi_list" , [ ] ) , "file_parent_list" : f . get ( "file_parent_list" , [ ] ) , "file_assoc_list" : f . get ( "assoc_list" , [ ] ) , "file_output_config_list" : f . get ( "file_output_config_list" , [ ] ) } )
businput . append ( f )
self . dbsFile . insertFile ( businput , qInserts )
except cjson . DecodeError as dc :
dbsExceptionHandler ( "dbsException-invalid-input2" , "Wrong format/data from insert File input" , self . logger . exception , str ( dc ) )
except dbsException as de :
dbsExceptionHandler ( de . eCode , de . message , self . logger . exception , de . message )
except HTTPError as he :
raise he
except Exception as ex :
sError = "DBSWriterModel/insertFile. %s\n. Exception trace: \n %s" % ( ex , traceback . format_exc ( ) )
dbsExceptionHandler ( 'dbsException-server-error' , dbsExceptionCode [ 'dbsException-server-error' ] , self . logger . exception , sError )
|
def parse_cstring ( stream , offset ) :
"""parse _ cstring will parse a null - terminated string in a bytestream .
The string will be decoded with UTF - 8 decoder , of course since we are
doing this byte - a - byte , it won ' t really work for all Unicode strings .
TODO : add proper Unicode support"""
|
stream . seek ( offset )
string = ""
while True :
char = struct . unpack ( 'c' , stream . read ( 1 ) ) [ 0 ]
if char == b'\x00' :
return string
else :
string += char . decode ( )
|
def earth_orientation ( date ) :
"""Earth orientation as a rotating matrix"""
|
x_p , y_p , s_prime = np . deg2rad ( _earth_orientation ( date ) )
return rot3 ( - s_prime ) @ rot2 ( x_p ) @ rot1 ( y_p )
|
def validate ( self , body , params = None ) :
""": arg body : The job config"""
|
if body in SKIP_IN_PATH :
raise ValueError ( "Empty value passed for a required argument 'body'." )
return self . transport . perform_request ( "POST" , "/_ml/anomaly_detectors/_validate" , params = params , body = body )
|
def _process_info ( raw_info : VideoInfo ) -> VideoInfo :
"""Process raw information about the video ( parse date , etc . ) ."""
|
raw_date = raw_info . date
date = datetime . strptime ( raw_date , '%Y-%m-%d %H:%M' )
# 2018-04-05 17:00
video_info = raw_info . _replace ( date = date )
return video_info
|
def unpack_nested_exception ( error ) :
"""If exception are stacked , return the first one
: param error : A python exception with possible exception embeded within
: return : A python exception with no exception embeded within"""
|
i = 0
while True :
if error . args [ i : ] :
if isinstance ( error . args [ i ] , Exception ) :
error = error . args [ i ]
i = 0
else :
i += 1
else :
break
return error
|
def set_result ( self , msg , valid = True , overwrite = False ) :
"""Set result string and validity ."""
|
if self . has_result and not overwrite :
log . warn ( LOG_CHECK , "Double result %r (previous %r) for %s" , msg , self . result , self )
else :
self . has_result = True
if not isinstance ( msg , unicode ) :
log . warn ( LOG_CHECK , "Non-unicode result for %s: %r" , self , msg )
elif not msg :
log . warn ( LOG_CHECK , "Empty result for %s" , self )
self . result = msg
self . valid = valid
# free content data
self . data = None
|
def load_dataset ( date ) :
"""Load the dataset for a single date
Parameters
date : the date ( string ) for which to load the data & dataset
Returns
ds : the dataset object"""
|
LAB_DIR = "/home/annaho/TheCannon/data/lamost"
WL_DIR = "/home/annaho/TheCannon/code/lamost/mass_age/cn"
SPEC_DIR = "/home/annaho/TheCannon/code/apogee_lamost/xcalib_4labels/output"
wl = np . load ( WL_DIR + "/wl_cols.npz" ) [ 'arr_0' ] [ 0 : 3626 ]
# no cols
ds = dataset . Dataset ( wl , [ ] , [ ] , [ ] , [ ] , [ ] , [ ] , [ ] )
test_label = np . load ( "%s/%s_all_cannon_labels.npz" % ( LAB_DIR , date ) ) [ 'arr_0' ]
ds . test_label_vals = test_label
a = np . load ( "%s/%s_norm.npz" % ( SPEC_DIR , date ) )
ds . test_flux = a [ 'arr_0' ]
ds . test_ivar = a [ 'arr_1' ]
ds . test_ID = np . load ( "%s/%s_ids.npz" % ( SPEC_DIR , date ) ) [ 'arr_0' ]
return ds
|
def power_modulo_p ( n : int , p : int ) -> int :
"""Return 2 ^ n modulo p .
This function computes the value of 2 ^ n modulo p .
Args :
n ( int ) : The exponent to which 2 should be raised .
p ( int ) : The modulus against which the resulting value should be determined .
Returns :
int : The result of the calculation 2 ^ n modulo p .
Examples :
> > > power _ modulo _ p ( 3 , 5)
> > > power _ modulo _ p ( 1101 , 101)
> > > power _ modulo _ p ( 0 , 101)
> > > power _ modulo _ p ( 3 , 11)
> > > power _ modulo _ p ( 100 , 101)"""
|
if p == 0 :
raise ValueError ( "Modulo by zero is undefined." )
return pow ( 2 , n , p )
|
def set_field_value ( self , field_name , value ) :
"""Set value of response field named ` field _ name ` .
If response contains single item , its field is set .
If response contains multiple items , all the items in response
are edited .
To edit response meta ( e . g . ' count ' ) edit response directly at
` event . response ` .
: param field _ name : Name of response field value of which should
be set .
: param value : Value to be set ."""
|
if self . response is None :
return
if 'data' in self . response :
items = self . response [ 'data' ]
else :
items = [ self . response ]
for item in items :
item [ field_name ] = value
|
def modify_symbol ( sym : ast . Symbol , scope : ast . InstanceClass ) -> None :
"""Apply a modification to a symbol if the scope matches ( or is None )
: param sym : symbol to apply modifications for
: param scope : scope of modification"""
|
# We assume that we do not screw up the order of applying modifications
# when " moving up " with the scope .
apply_args = [ x for x in sym . class_modification . arguments if x . scope is None or x . scope . full_reference ( ) . to_tuple ( ) == scope . full_reference ( ) . to_tuple ( ) ]
skip_args = [ x for x in sym . class_modification . arguments if x . scope is not None and x . scope . full_reference ( ) . to_tuple ( ) != scope . full_reference ( ) . to_tuple ( ) ]
for class_mod_argument in apply_args :
argument = class_mod_argument . value
assert isinstance ( argument , ast . ElementModification ) , "Found redeclaration modification which should already have been handled."
# TODO : Strip all non - symbol stuff .
if argument . component . name not in ast . Symbol . ATTRIBUTES :
raise Exception ( "Trying to set unknown symbol property {}" . format ( argument . component . name ) )
setattr ( sym , argument . component . name , argument . modifications [ 0 ] )
sym . class_modification . arguments = skip_args
|
def set_selected_pair ( self , component , local_foundation , remote_foundation ) :
"""Force the selected candidate pair .
If the remote party does not support ICE , you should using this
instead of calling : meth : ` connect ` ."""
|
# find local candidate
protocol = None
for p in self . _protocols :
if ( p . local_candidate . component == component and p . local_candidate . foundation == local_foundation ) :
protocol = p
break
# find remote candidate
remote_candidate = None
for c in self . _remote_candidates :
if c . component == component and c . foundation == remote_foundation :
remote_candidate = c
assert ( protocol and remote_candidate )
self . _nominated [ component ] = CandidatePair ( protocol , remote_candidate )
|
async def async_get_api_key ( session , host , port , username = None , password = None , ** kwargs ) :
"""Get a new API key for devicetype ."""
|
url = 'http://{host}:{port}/api' . format ( host = host , port = str ( port ) )
auth = None
if username and password :
auth = aiohttp . BasicAuth ( username , password = password )
data = b'{"devicetype": "pydeconz"}'
response = await async_request ( session . post , url , auth = auth , data = data )
api_key = response [ 0 ] [ 'success' ] [ 'username' ]
_LOGGER . info ( "API key: %s" , api_key )
return api_key
|
def _unload ( self , ) :
"""Unloads the plugin
: raises : errors . PluginUninitError"""
|
super ( JB_MayaPlugin , self ) . _unload ( )
try :
if not jukeboxmaya . STANDALONE_INITIALIZED :
self . uninit_ui ( )
except Exception :
log . exception ( "Unload Ui failed!" )
|
def send ( self , soapenv ) :
"""Send SOAP message .
Depending on how the ` ` nosend ` ` & ` ` retxml ` ` options are set , may do
one of the following :
* Return a constructed web service operation request without sending
it to the web service .
* Invoke the web service operation and return its SOAP reply XML .
* Invoke the web service operation , process its results and return
the Python object representing the returned value .
@ param soapenv : A SOAP envelope to send .
@ type soapenv : L { Document }
@ return : SOAP request , SOAP reply or a web service return value .
@ rtype : L { RequestContext } | I { builtin } | I { subclass of } L { Object } | I { bytes } |
I { None }"""
|
location = self . __location ( )
log . debug ( "sending to (%s)\nmessage:\n%s" , location , soapenv )
plugins = PluginContainer ( self . options . plugins )
plugins . message . marshalled ( envelope = soapenv . root ( ) )
if self . options . prettyxml :
soapenv = soapenv . str ( )
else :
soapenv = soapenv . plain ( )
soapenv = soapenv . encode ( "utf-8" )
ctx = plugins . message . sending ( envelope = soapenv )
soapenv = ctx . envelope
if self . options . nosend :
return RequestContext ( self . process_reply , soapenv )
request = suds . transport . Request ( location , soapenv )
request . headers = self . __headers ( )
try :
timer = metrics . Timer ( )
timer . start ( )
reply = self . options . transport . send ( request )
timer . stop ( )
metrics . log . debug ( "waited %s on server reply" , timer )
except suds . transport . TransportError , e :
content = e . fp and e . fp . read ( ) or ""
return self . process_reply ( content , e . httpcode , tostr ( e ) )
return self . process_reply ( reply . message , None , None )
|
def delete ( self , constraint ) :
"""Delete a record from the repository"""
|
results = self . _get_repo_filter ( Service . objects ) . extra ( where = [ constraint [ 'where' ] ] , params = constraint [ 'values' ] ) . all ( )
deleted = len ( results )
results . delete ( )
return deleted
|
def _invite ( self , name , method , email , uuid , event , password = "" ) :
"""Actually invite a given user"""
|
props = { 'uuid' : std_uuid ( ) , 'status' : 'Open' , 'name' : name , 'method' : method , 'email' : email , 'password' : password , 'timestamp' : std_now ( ) }
enrollment = objectmodels [ 'enrollment' ] ( props )
enrollment . save ( )
self . log ( 'Enrollment stored' , lvl = debug )
self . _send_invitation ( enrollment , event )
packet = { 'component' : 'hfos.enrol.enrolmanager' , 'action' : 'invite' , 'data' : [ True , email ] }
self . fireEvent ( send ( uuid , packet ) )
|
def resource_string ( package_or_requirement , resource_name ) :
"""Similar to pkg _ resources . resource _ string but if the resource it not found via pkg _ resources
it also looks in a predefined list of paths in order to find the resource
: param package _ or _ requirement : the module in which the resource resides
: param resource _ name : the name of the resource
: return : the file content
: rtype : str"""
|
with open ( resource_filename ( package_or_requirement , resource_name ) , 'r' ) as resource_file :
return resource_file . read ( )
|
def find_related ( self , fullname ) :
"""Return a list of non - stdlib modules that are imported directly or
indirectly by ` fullname ` , plus their parents .
This method is like : py : meth : ` find _ related _ imports ` , but also
recursively searches any modules which are imported by ` fullname ` .
: param fullname : Fully qualified name of an _ already imported _ module
for which source code can be retrieved
: type fullname : str"""
|
stack = [ fullname ]
found = set ( )
while stack :
name = stack . pop ( 0 )
names = self . find_related_imports ( name )
stack . extend ( set ( names ) . difference ( set ( found ) . union ( stack ) ) )
found . update ( names )
found . discard ( fullname )
return sorted ( found )
|
def scatterAlign ( seq1 , seq2 , window = 7 ) :
"""Visually align two sequences ."""
|
d1 = defaultdict ( list )
d2 = defaultdict ( list )
for ( seq , section_dict ) in [ ( seq1 , d1 ) , ( seq2 , d2 ) ] :
for i in range ( len ( seq ) - window ) :
section = seq [ i : i + window ]
section_dict [ section ] . append ( i )
matches = set ( d1 ) . intersection ( d2 )
print ( '%i unique matches' % len ( matches ) )
x = [ ]
y = [ ]
for section in matches :
for i in d1 [ section ] :
for j in d2 [ section ] :
x . append ( i )
y . append ( j )
# plt . cla ( ) # clear any prior graph
plt . gray ( )
plt . scatter ( x , y )
plt . xlim ( 0 , len ( seq1 ) - window )
plt . ylim ( 0 , len ( seq2 ) - window )
plt . xlabel ( 'length %i bp' % ( len ( seq1 ) ) )
plt . ylabel ( 'length %i bp' % ( len ( seq2 ) ) )
plt . title ( 'Dot plot using window size %i\n(allowing no mis-matches)' % window )
plt . show ( )
|
def local_asn ( self , ** kwargs ) :
"""Set BGP local ASN .
Args :
local _ as ( str ) : Local ASN of NOS deice .
vrf ( str ) : The VRF for this BGP process .
rbridge _ id ( str ) : The rbridge ID of the device on which BGP will be
configured in a VCS fabric .
get ( bool ) : Get config instead of editing config . ( True , False )
callback ( function ) : A function executed upon completion of the
method . The only parameter passed to ` callback ` will be the
` ` ElementTree ` ` ` config ` .
Returns :
Return value of ` callback ` .
Raises :
KeyError : if ` local _ as ` is not specified .
Examples :
> > > import pynos . device
> > > conn = ( ' 10.24.39.211 ' , ' 22 ' )
> > > auth = ( ' admin ' , ' password ' )
> > > with pynos . device . Device ( conn = conn , auth = auth ) as dev :
. . . output = dev . bgp . local _ asn ( local _ as = ' 65535 ' ,
. . . rbridge _ id = ' 225 ' )
. . . output = dev . bgp . local _ asn ( local _ as = ' 65535 ' ,
. . . rbridge _ id = ' 225 ' , get = True )
. . . dev . bgp . local _ asn ( ) # doctest : + IGNORE _ EXCEPTION _ DETAIL
Traceback ( most recent call last ) :
KeyError"""
|
vrf = kwargs . pop ( 'vrf' , 'default' )
is_get_config = kwargs . pop ( 'get' , False )
if not is_get_config :
local_as = kwargs . pop ( 'local_as' )
else :
local_as = ''
rbridge_id = kwargs . pop ( 'rbridge_id' , '1' )
callback = kwargs . pop ( 'callback' , self . _callback )
bgp_args = dict ( vrf_name = vrf , rbridge_id = rbridge_id )
config = self . _rbridge . rbridge_id_router_bgp_vrf_name ( ** bgp_args )
if not is_get_config :
callback ( config )
local_as_args = dict ( vrf_name = vrf , local_as = local_as , rbridge_id = rbridge_id )
local_as = getattr ( self . _rbridge , 'rbridge_id_router_bgp_router_bgp_cmds_holder_' 'router_bgp_attributes_local_as' )
config = local_as ( ** local_as_args )
if is_get_config :
return callback ( config , handler = 'get_config' )
return callback ( config )
|
def _add_node ( self , idx , unique_idx , frac_coords ) :
"""Add information about a node describing a critical point .
: param idx : unique index
: param unique _ idx : index of unique CriticalPoint ,
used to look up more information of point ( field etc . )
: param frac _ coord : fractional co - ordinates of point
: return :"""
|
self . nodes [ idx ] = { 'unique_idx' : unique_idx , 'frac_coords' : frac_coords }
|
def extend_partial ( self , times , obs_times , obs_losses , config = None ) :
"""extends a partially observed curve
Parameters :
times : numpy array
times where to predict the loss
obs _ times : numpy array
times where the curve has already been observed
obs _ losses : numpy array
corresponding observed losses
config : numpy array
numerical reperesentation of the config ; None if no config
information is available
Returns :
mean and variance prediction at input times"""
|
return self . predict_unseen ( times , config )
|
def indent ( self , space = 4 ) :
'''Return an indented Newick string , just like ` ` nw _ indent ` ` in Newick Utilities
Args :
` ` space ` ` ( ` ` int ` ` ) : The number of spaces a tab should equal
Returns :
` ` str ` ` : An indented Newick string'''
|
if not isinstance ( space , int ) :
raise TypeError ( "space must be an int" )
if space < 0 :
raise ValueError ( "space must be a non-negative integer" )
space = ' ' * space ;
o = [ ] ;
l = 0
for c in self . newick ( ) :
if c == '(' :
o . append ( '(\n' ) ;
l += 1 ;
o . append ( space * l )
elif c == ')' :
o . append ( '\n' ) ;
l -= 1 ;
o . append ( space * l ) ;
o . append ( ')' )
elif c == ',' :
o . append ( ',\n' ) ;
o . append ( space * l )
else :
o . append ( c )
return '' . join ( o )
|
def list_plugins ( ) :
'''List all the munin plugins
CLI Example :
. . code - block : : bash
salt ' * ' munin . list _ plugins'''
|
pluginlist = os . listdir ( PLUGINDIR )
ret = [ ]
for plugin in pluginlist : # Check if execute bit
statf = os . path . join ( PLUGINDIR , plugin )
try :
executebit = stat . S_IXUSR & os . stat ( statf ) [ stat . ST_MODE ]
except OSError :
pass
if executebit :
ret . append ( plugin )
return ret
|
def map_permissions_check ( view_func ) :
"""Used for URLs dealing with the map ."""
|
@ wraps ( view_func )
def wrapper ( request , * args , ** kwargs ) :
map_inst = get_object_or_404 ( Map , pk = kwargs [ 'map_id' ] )
user = request . user
kwargs [ 'map_inst' ] = map_inst
# Avoid rerequesting the map in the view
if map_inst . edit_status >= map_inst . EDITORS :
can_edit = map_inst . can_edit ( user = user , request = request )
if not can_edit :
if map_inst . owner and not user . is_authenticated :
return simple_json_response ( login_required = str ( LOGIN_URL ) )
return HttpResponseForbidden ( )
return view_func ( request , * args , ** kwargs )
return wrapper
|
def get_all_chunks_for_term ( self , termid ) :
"""Returns all the chunks in which the term is contained
@ type termid : string
@ param termid : the term identifier
@ rtype : list
@ return : list of chunks"""
|
terminal_id = self . terminal_for_term . get ( termid )
paths = self . paths_for_terminal [ terminal_id ]
for path in paths :
for node in path :
this_type = self . label_for_nonter [ node ]
subsumed = self . terms_subsumed_by_nonter . get ( node )
if subsumed is not None :
yield this_type , sorted ( list ( subsumed ) )
|
def _batch_load ( project , workspace , headerline , entity_data , chunk_size = 500 ) :
"""Submit a large number of entity updates in batches of chunk _ size"""
|
if fcconfig . verbosity :
print ( "Batching " + str ( len ( entity_data ) ) + " updates to Firecloud..." )
# Parse the entity type from the first cell , e . g . " entity : sample _ id "
# First check that the header is valid
if not _valid_headerline ( headerline ) :
eprint ( "Invalid loadfile header:\n" + headerline )
return 1
update_type = "membership" if headerline . startswith ( "membership" ) else "entitie"
etype = headerline . split ( '\t' ) [ 0 ] . split ( ':' ) [ 1 ] . replace ( "_id" , "" )
# Split entity _ data into chunks
total = int ( len ( entity_data ) / chunk_size ) + 1
batch = 0
for i in range ( 0 , len ( entity_data ) , chunk_size ) :
batch += 1
if fcconfig . verbosity :
print ( "Updating {0} {1}s {2}-{3}, batch {4}/{5}" . format ( etype , update_type , i + 1 , min ( i + chunk_size , len ( entity_data ) ) , batch , total ) )
this_data = headerline + '\n' + '\n' . join ( entity_data [ i : i + chunk_size ] )
# Now push the entity data to firecloud
r = fapi . upload_entities ( project , workspace , this_data )
fapi . _check_response_code ( r , 200 )
return 0
|
def filter_tasks ( self , task_names , keep_dependencies = False ) :
"""If filter is applied only tasks with given name and its dependencies ( if keep _ keep _ dependencies = True ) are kept in the list of tasks ."""
|
new_tasks = { }
for task_name in task_names :
task = self . get_task ( task_name )
if task not in new_tasks :
new_tasks [ task . name ] = task
if keep_dependencies :
for dependency in task . ordered_dependencies ( ) :
if dependency not in new_tasks :
new_tasks [ dependency . name ] = dependency
else : # strip dependencies
task . dependencies = set ( )
self . tasks = new_tasks
|
def extract_subject_info_extension ( cert_obj ) :
"""Extract DataONE SubjectInfo XML doc from certificate .
Certificates issued by DataONE may include an embedded XML doc containing
additional information about the subject specified in the certificate DN . If
present , the doc is stored as an extension with an OID specified by DataONE and
formatted as specified in the DataONE SubjectInfo schema definition .
Args :
cert _ obj : cryptography . Certificate
Returns :
str : SubjectInfo XML doc if present , else None"""
|
try :
subject_info_der = cert_obj . extensions . get_extension_for_oid ( cryptography . x509 . oid . ObjectIdentifier ( DATAONE_SUBJECT_INFO_OID ) ) . value . value
return str ( pyasn1 . codec . der . decoder . decode ( subject_info_der ) [ 0 ] )
except Exception as e :
logging . debug ( 'SubjectInfo not extracted. reason="{}"' . format ( e ) )
|
def validate_user_threaded_json ( pjson ) :
"""Takes a parsed JSON dict representing a set of tests in the user - threaded
format and validates it ."""
|
tests = pjson [ "tests" ]
# Verify that ' tests ' is a two dimensional list
is_2d_list = lambda ls : len ( ls ) == len ( filter ( lambda l : type ( l ) is list , ls ) )
if type ( tests ) is not list or not is_2d_list ( tests ) :
raise ParseError ( "'tests' should be a two-dimensional list of strings when '--user-defined-threads' is present." )
# Verify that ' tests ' sub - lists are of strings
for sublist in tests :
for test in sublist :
if type ( test ) is not unicode :
raise TypeError ( "Expected a unicode string but got %s. 'tests' should be a two-dimensional list of strings when '--user-defined-threads is present.'" % test )
# Verify that ' environments ' key is not present
try :
pjson [ "environments" ]
raise ParseError ( "'environments' list is not allowed when --user-defined-threads is present." )
except KeyError :
pass
|
def urlfetch_async ( self , url , method = 'GET' , headers = None , payload = None , deadline = None , callback = None , follow_redirects = False ) :
"""Make an async urlfetch ( ) call .
This is an async wrapper around urlfetch ( ) . It adds an authentication
header .
Args :
url : the url to fetch .
method : the method in which to fetch .
headers : the http headers .
payload : the data to submit in the fetch .
deadline : the deadline in which to make the call .
callback : the call to make once completed .
follow _ redirects : whether or not to follow redirects .
Yields :
This returns a Future despite not being decorated with @ ndb . tasklet !"""
|
headers = { } if headers is None else dict ( headers )
headers . update ( self . user_agent )
try :
self . token = yield self . get_token_async ( )
except app_identity . InternalError , e :
if os . environ . get ( 'DATACENTER' , '' ) . endswith ( 'sandman' ) :
self . token = None
logging . warning ( 'Could not fetch an authentication token in sandman ' 'based Appengine devel setup; proceeding without one.' )
else :
raise e
if self . token :
headers [ 'authorization' ] = 'OAuth ' + self . token
deadline = deadline or self . retry_params . urlfetch_timeout
ctx = ndb . get_context ( )
resp = yield ctx . urlfetch ( url , payload = payload , method = method , headers = headers , follow_redirects = follow_redirects , deadline = deadline , callback = callback )
raise ndb . Return ( resp )
|
def _get_json ( endpoint , params , referer = 'scores' ) :
"""Internal method to streamline our requests / json getting
Args :
endpoint ( str ) : endpoint to be called from the API
params ( dict ) : parameters to be passed to the API
Raises :
HTTPError : if requests hits a status code ! = 200
Returns :
json ( json ) : json object for selected API call"""
|
h = dict ( HEADERS )
h [ 'referer' ] = 'http://stats.nba.com/{ref}/' . format ( ref = referer )
_get = get ( BASE_URL . format ( endpoint = endpoint ) , params = params , headers = h )
# print _ get . url
_get . raise_for_status ( )
return _get . json ( )
|
def parse_cache_url ( url ) :
"""Parses a cache URL ."""
|
config = { }
url = urlparse . urlparse ( url )
# Update with environment configuration .
config [ 'BACKEND' ] = CACHE_SCHEMES [ url . scheme ]
if url . scheme in ( 'file' , 'uwsgi' ) :
config [ 'LOCATION' ] = url . path
return config
elif url . scheme in ( 'redis' , 'hiredis' ) :
if url . netloc == 'unix' :
location_index = None
bits = list ( filter ( None , url . path . split ( '/' ) ) )
# find the end of the socket path
for index , bit in enumerate ( bits , 1 ) :
if bit . endswith ( ( '.sock' , '.socket' ) ) :
location_index = index
break
if location_index is None : # no socket file extension found , using the whole location
location = bits
else : # splitting socket path from database and prefix
location = bits [ : location_index ]
rest = bits [ location_index : ]
if len ( rest ) > 0 :
try : # check if first item of the rest is a database
database = int ( rest [ 0 ] )
prefix = rest [ 1 : ]
except ValueError : # or assume the rest is the prefix
database = 0
prefix = rest
else :
database = prefix = None
full_location = ( url . netloc , '/' + '/' . join ( location ) )
if database is not None :
full_location += ( str ( database ) , )
config [ 'LOCATION' ] = ':' . join ( full_location )
config [ 'KEY_PREFIX' ] = '/' . join ( prefix )
else :
try :
userpass , hostport = url . netloc . split ( '@' )
except ValueError :
userpass , hostport = '' , url . netloc
try :
username , password = userpass . split ( ':' )
except ValueError :
pass
path = list ( filter ( None , url . path . split ( '/' ) ) )
config [ 'LOCATION' ] = ':' . join ( ( hostport , path [ 0 ] ) )
config [ 'KEY_PREFIX' ] = '/' . join ( path [ 1 : ] )
redis_options = { }
if url . scheme == 'hiredis' :
redis_options [ 'PARSER_CLASS' ] = 'redis.connection.HiredisParser'
try :
if password :
redis_options [ 'PASSWORD' ] = password
except NameError : # No password defined
pass
if redis_options :
config [ 'OPTIONS' ] = redis_options
else :
netloc_list = url . netloc . split ( ',' )
if len ( netloc_list ) > 1 :
config [ 'LOCATION' ] = netloc_list
else :
config [ 'LOCATION' ] = url . netloc
config [ 'KEY_PREFIX' ] = url . path [ 1 : ]
return config
|
def annotate ( g , fname , tables , feature_strand = False , in_memory = False , header = None , out = sys . stdout , _chrom = None , parallel = False ) :
"""annotate bed file in fname with tables .
distances are integers for distance . and intron / exon / utr5 etc for gene - pred
tables . if the annotation features have a strand , the distance reported is
negative if the annotation feature is upstream of the feature in question
if feature _ strand is True , then the distance is negative if t"""
|
close = False
if isinstance ( out , basestring ) :
out = nopen ( out , "w" )
close = True
if parallel :
import multiprocessing
import signal
p = multiprocessing . Pool ( initializer = lambda : signal . signal ( signal . SIGINT , signal . SIG_IGN ) )
chroms = _split_chroms ( fname )
def write_result ( fanno , written = [ False ] ) :
for i , d in enumerate ( reader ( fanno , header = "ordered" ) ) :
if i == 0 and written [ 0 ] == False :
print >> out , "\t" . join ( d . keys ( ) )
written [ 0 ] = True
print >> out , "\t" . join ( x if x else "NA" for x in d . values ( ) )
os . unlink ( fanno )
os . unlink ( fanno . replace ( ".anno" , "" ) )
for fchrom , ( fout , fanno ) in chroms :
p . apply_async ( annotate , args = ( g . db , fout . name , tables , feature_strand , True , header , fanno , fchrom ) , callback = write_result )
p . close ( )
p . join ( )
return out . name
if isinstance ( g , basestring ) :
from . import Genome
g = Genome ( g )
if in_memory :
from . intersecter import Intersecter
intersecters = [ ]
# 1 per table .
for t in tables :
q = getattr ( g , t ) if isinstance ( t , basestring ) else t
if _chrom is not None :
q = q . filter_by ( chrom = _chrom )
table_iter = q
# page _ query ( q , g . session )
intersecters . append ( Intersecter ( table_iter ) )
elif isinstance ( fname , basestring ) and os . path . exists ( fname ) and sum ( 1 for _ in nopen ( fname ) ) > 25000 :
print >> sys . stderr , "annotating many intervals, may be faster using in_memory=True"
if header is None :
header = [ ]
extra_header = [ ]
for j , toks in enumerate ( reader ( fname , header = False ) ) :
if j == 0 and not header :
if not ( toks [ 1 ] + toks [ 2 ] ) . isdigit ( ) :
header = toks
if j == 0 :
for t in tables :
annos = ( getattr ( g , t ) if isinstance ( t , basestring ) else t ) . first ( ) . anno_cols
h = t if isinstance ( t , basestring ) else t . _table . name if hasattr ( t , "_table" ) else t . first ( ) . _table . name
extra_header += [ "%s_%s" % ( h , a ) for a in annos ]
if 0 != len ( header ) :
if not header [ 0 ] . startswith ( "#" ) :
header [ 0 ] = "#" + header [ 0 ]
print >> out , "\t" . join ( header + extra_header )
if header == toks :
continue
if not isinstance ( toks , ABase ) :
f = Feature ( )
f . chrom = toks [ 0 ]
f . txStart = int ( toks [ 1 ] )
f . txEnd = int ( toks [ 2 ] )
try :
f . strand = toks [ header . index ( 'strand' ) ]
except ValueError :
pass
else :
f = toks
# for now , use the objects str to get the columns
# might want to use getattr on the original cols
toks = f . bed ( * header ) . split ( "\t" )
sep = "^*^"
for ti , tbl in enumerate ( tables ) :
if in_memory :
objs = intersecters [ ti ] . knearest ( int ( toks [ 1 ] ) , int ( toks [ 2 ] ) , chrom = toks [ 0 ] , k = 1 )
else :
objs = g . knearest ( tbl , toks [ 0 ] , int ( toks [ 1 ] ) , int ( toks [ 2 ] ) , k = 1 )
if len ( objs ) == 0 :
print >> out , "\t" . join ( toks + [ "" , "" , "" ] )
continue
gp = hasattr ( objs [ 0 ] , "exonStarts" )
names = [ o . gene_name for o in objs ]
if feature_strand :
strands = [ - 1 if f . is_upstream_of ( o ) else 1 for o in objs ]
else :
strands = [ - 1 if o . is_upstream_of ( f ) else 1 for o in objs ]
# dists can be a list of tuples where the 2nd item is something
# like ' island ' or ' shore '
dists = [ o . distance ( f , features = gp ) for o in objs ]
pure_dists = [ d [ 0 ] if isinstance ( d , ( tuple , list ) ) else d for d in dists ]
# convert to negative if the feature is upstream of the query
for i , s in enumerate ( strands ) :
if s == 1 :
continue
if isinstance ( pure_dists [ i ] , basestring ) :
continue
pure_dists [ i ] *= - 1
for i , ( pd , d ) in enumerate ( zip ( pure_dists , dists ) ) :
if isinstance ( d , tuple ) :
if len ( d ) > 1 :
dists [ i ] = "%s%s%s" % ( pd , sep , sep . join ( d [ 1 : ] ) )
else :
dists [ i ] = pd
# keep uniqe name , dist combinations ( occurs because of
# transcripts )
name_dists = set ( [ "%s%s%s" % ( n , sep , d ) for ( n , d ) in zip ( names , dists ) ] )
name_dists = [ nd . split ( sep ) for nd in name_dists ]
# just take the first gene name if they are all the same
if len ( set ( nd [ 0 ] for nd in name_dists ) ) == 1 :
toks . append ( name_dists [ 0 ] [ 0 ] )
else :
toks . append ( ";" . join ( nd [ 0 ] for nd in name_dists ) )
# iterate over the feat type , dist cols
for i in range ( 1 , len ( name_dists [ 0 ] ) ) :
toks . append ( ";" . join ( nd [ i ] for nd in name_dists ) )
print >> out , "\t" . join ( toks )
if close :
out . close ( )
return out . name
|
def new_dataset ( data , identifier = None ) :
"""Initialize a new RT - DC dataset
Parameters
data :
can be one of the following :
- dict
- . tdms file
- . rtdc file
- subclass of ` RTDCBase `
( will create a hierarchy child )
identifier : str
A unique identifier for this dataset . If set to ` None `
an identifier is generated .
Returns
dataset : subclass of : class : ` dclab . rtdc _ dataset . RTDCBase `
A new dataset instance"""
|
if isinstance ( data , dict ) :
return fmt_dict . RTDC_Dict ( data , identifier = identifier )
elif isinstance ( data , ( str_types ) ) or isinstance ( data , pathlib . Path ) :
return load_file ( data , identifier = identifier )
elif isinstance ( data , RTDCBase ) :
return fmt_hierarchy . RTDC_Hierarchy ( data , identifier = identifier )
else :
msg = "data type not supported: {}" . format ( data . __class__ )
raise NotImplementedError ( msg )
|
def main ( argv = None ) :
'''Command line options .'''
|
program_name = os . path . basename ( sys . argv [ 0 ] )
program_version = version
program_build_date = "%s" % __updated__
program_version_string = '%%prog %s (%s)' % ( program_version , program_build_date )
# program _ usage = ' ' ' usage : spam two eggs ' ' ' # optional - will be autogenerated by optparse
program_longdesc = ''''''
# optional - give further explanation about what the program does
program_license = "Copyright 2015-2018 Mohamed El Amine SEHILI \
Licensed under the General Public License (GPL) Version 3 \nhttp://www.gnu.org/licenses/"
if argv is None :
argv = sys . argv [ 1 : ]
try : # setup option parser
parser = OptionParser ( version = program_version_string , epilog = program_longdesc , description = program_license )
group = OptionGroup ( parser , "[Input-Output options]" )
group . add_option ( "-i" , "--input" , dest = "input" , help = "Input audio or video file. Use - for stdin [default: read from microphone using pyaudio]" , metavar = "FILE" )
group . add_option ( "-t" , "--input-type" , dest = "input_type" , help = "Input audio file type. Mandatory if file name has no extension [default: %default]" , type = str , default = None , metavar = "String" )
group . add_option ( "-M" , "--max_time" , dest = "max_time" , help = "Max data (in seconds) to read from microphone/file [default: read until the end of file/stream]" , type = float , default = None , metavar = "FLOAT" )
group . add_option ( "-O" , "--output-main" , dest = "output_main" , help = "Save main stream as. If omitted main stream will not be saved [default: omitted]" , type = str , default = None , metavar = "FILE" )
group . add_option ( "-o" , "--output-tokens" , dest = "output_tokens" , help = "Output file name format for detections. Use {N} and {start} and {end} to build file names, example: 'Det_{N}_{start}-{end}.wav'" , type = str , default = None , metavar = "STRING" )
group . add_option ( "-T" , "--output-type" , dest = "output_type" , help = "Audio type used to save detections and/or main stream. If not supplied will: (1). guess from extension or (2). use wav format" , type = str , default = None , metavar = "STRING" )
group . add_option ( "-u" , "--use-channel" , dest = "use_channel" , help = "Choose channel to use from a multi-channel audio file (requires pydub). 'left', 'right' and 'mix' are accepted values. [Default: 1 (i.e. 1st or left channel)]" , type = str , default = "1" , metavar = "STRING" )
parser . add_option_group ( group )
group = OptionGroup ( parser , "[Tokenization options]" , "Set tokenizer options and energy threshold." )
group . add_option ( "-a" , "--analysis-window" , dest = "analysis_window" , help = "Size of analysis window in seconds [default: %default (10ms)]" , type = float , default = 0.01 , metavar = "FLOAT" )
group . add_option ( "-n" , "--min-duration" , dest = "min_duration" , help = "Min duration of a valid audio event in seconds [default: %default]" , type = float , default = 0.2 , metavar = "FLOAT" )
group . add_option ( "-m" , "--max-duration" , dest = "max_duration" , help = "Max duration of a valid audio event in seconds [default: %default]" , type = float , default = 5 , metavar = "FLOAT" )
group . add_option ( "-s" , "--max-silence" , dest = "max_silence" , help = "Max duration of a consecutive silence within a valid audio event in seconds [default: %default]" , type = float , default = 0.3 , metavar = "FLOAT" )
group . add_option ( "-d" , "--drop-trailing-silence" , dest = "drop_trailing_silence" , help = "Drop trailing silence from a detection [default: keep trailing silence]" , action = "store_true" , default = False )
group . add_option ( "-e" , "--energy-threshold" , dest = "energy_threshold" , help = "Log energy threshold for detection [default: %default]" , type = float , default = 50 , metavar = "FLOAT" )
parser . add_option_group ( group )
group = OptionGroup ( parser , "[Audio parameters]" , "Define audio parameters if data is read from a headerless file (raw or stdin) or you want to use different microphone parameters." )
group . add_option ( "-r" , "--rate" , dest = "sampling_rate" , help = "Sampling rate of audio data [default: %default]" , type = int , default = 16000 , metavar = "INT" )
group . add_option ( "-c" , "--channels" , dest = "channels" , help = "Number of channels of audio data [default: %default]" , type = int , default = 1 , metavar = "INT" )
group . add_option ( "-w" , "--width" , dest = "sample_width" , help = "Number of bytes per audio sample [default: %default]" , type = int , default = 2 , metavar = "INT" )
group . add_option ( "-I" , "--input-device-index" , dest = "input_device_index" , help = "Audio device index [default: %default] - only when using PyAudio" , type = int , default = None , metavar = "INT" )
group . add_option ( "-F" , "--audio-frame-per-buffer" , dest = "frame_per_buffer" , help = "Audio frame per buffer [default: %default] - only when using PyAudio" , type = int , default = 1024 , metavar = "INT" )
parser . add_option_group ( group )
group = OptionGroup ( parser , "[Do something with detections]" , "Use these options to print, play or plot detections." )
group . add_option ( "-C" , "--command" , dest = "command" , help = "Command to call when an audio detection occurs. Use $ to represent the file name to use with the command (e.g. -C 'du -h $')" , default = None , type = str , metavar = "STRING" )
group . add_option ( "-E" , "--echo" , dest = "echo" , help = "Play back each detection immediately using pyaudio [default: do not play]" , action = "store_true" , default = False )
group . add_option ( "-p" , "--plot" , dest = "plot" , help = "Plot and show audio signal and detections (requires matplotlib)" , action = "store_true" , default = False )
group . add_option ( "" , "--save-image" , dest = "save_image" , help = "Save plotted audio signal and detections as a picture or a PDF file (requires matplotlib)" , type = str , default = None , metavar = "FILE" )
group . add_option ( "" , "--printf" , dest = "printf" , help = "print detections, one per line, using a user supplied format (e.g. '[{id}]: {start} -- {end}'). Available keywords {id}, {start}, {end} and {duration}" , type = str , default = "{id} {start} {end}" , metavar = "STRING" )
group . add_option ( "" , "--time-format" , dest = "time_format" , help = "format used to print {start} and {end}. [Default= %default]. %S: absolute time in sec. %I: absolute time in ms. If at least one of (%h, %m, %s, %i) is used, convert time into hours, minutes, seconds and millis (e.g. %h:%m:%s.%i). Only required fields are printed" , type = str , default = "%S" , metavar = "STRING" )
parser . add_option_group ( group )
parser . add_option ( "-q" , "--quiet" , dest = "quiet" , help = "Do not print any information about detections [default: print 'id', 'start' and 'end' of each detection]" , action = "store_true" , default = False )
parser . add_option ( "-D" , "--debug" , dest = "debug" , help = "Print processing operations to STDOUT" , action = "store_true" , default = False )
parser . add_option ( "" , "--debug-file" , dest = "debug_file" , help = "Print processing operations to FILE" , type = str , default = None , metavar = "FILE" )
# process options
( opts , args ) = parser . parse_args ( argv )
if opts . input == "-" :
asource = StdinAudioSource ( sampling_rate = opts . sampling_rate , sample_width = opts . sample_width , channels = opts . channels )
# read data from a file
elif opts . input is not None :
asource = file_to_audio_source ( filename = opts . input , filetype = opts . input_type , uc = opts . use_channel )
# read data from microphone via pyaudio
else :
try :
asource = PyAudioSource ( sampling_rate = opts . sampling_rate , sample_width = opts . sample_width , channels = opts . channels , frames_per_buffer = opts . frame_per_buffer , input_device_index = opts . input_device_index )
except Exception :
sys . stderr . write ( "Cannot read data from audio device!\n" )
sys . stderr . write ( "You should either install pyaudio or read data from STDIN\n" )
sys . exit ( 2 )
logger = logging . getLogger ( LOGGER_NAME )
logger . setLevel ( logging . DEBUG )
handler = logging . StreamHandler ( sys . stdout )
if opts . quiet or not opts . debug : # only critical messages will be printed
handler . setLevel ( logging . CRITICAL )
else :
handler . setLevel ( logging . DEBUG )
logger . addHandler ( handler )
if opts . debug_file is not None :
logger . setLevel ( logging . DEBUG )
opts . debug = True
handler = logging . FileHandler ( opts . debug_file , "w" )
fmt = logging . Formatter ( '[%(asctime)s] | %(message)s' )
handler . setFormatter ( fmt )
handler . setLevel ( logging . DEBUG )
logger . addHandler ( handler )
record = opts . output_main is not None or opts . plot or opts . save_image is not None
ads = ADSFactory . ads ( audio_source = asource , block_dur = opts . analysis_window , max_time = opts . max_time , record = record )
validator = AudioEnergyValidator ( sample_width = asource . get_sample_width ( ) , energy_threshold = opts . energy_threshold )
if opts . drop_trailing_silence :
mode = StreamTokenizer . DROP_TRAILING_SILENCE
else :
mode = 0
analysis_window_per_second = 1. / opts . analysis_window
tokenizer = StreamTokenizer ( validator = validator , min_length = opts . min_duration * analysis_window_per_second , max_length = int ( opts . max_duration * analysis_window_per_second ) , max_continuous_silence = opts . max_silence * analysis_window_per_second , mode = mode )
observers = [ ]
tokenizer_worker = None
if opts . output_tokens is not None :
try : # check user format is correct
fname = opts . output_tokens . format ( N = 0 , start = 0 , end = 0 )
# find file type for detections
tok_type = opts . output_type
if tok_type is None :
tok_type = os . path . splitext ( opts . output_tokens ) [ 1 ] [ 1 : ]
if tok_type == "" :
tok_type = "wav"
token_saver = TokenSaverWorker ( name_format = opts . output_tokens , filetype = tok_type , debug = opts . debug , logger = logger , sr = asource . get_sampling_rate ( ) , sw = asource . get_sample_width ( ) , ch = asource . get_channels ( ) )
observers . append ( token_saver )
except Exception :
sys . stderr . write ( "Wrong format for detections file name: '{0}'\n" . format ( opts . output_tokens ) )
sys . exit ( 2 )
if opts . echo :
try :
player = player_for ( asource )
player_worker = PlayerWorker ( player = player , debug = opts . debug , logger = logger )
observers . append ( player_worker )
except Exception :
sys . stderr . write ( "Cannot get an audio player!\n" )
sys . stderr . write ( "You should either install pyaudio or supply a command (-C option) to play audio\n" )
sys . exit ( 2 )
if opts . command is not None and len ( opts . command ) > 0 :
cmd_worker = CommandLineWorker ( command = opts . command , debug = opts . debug , logger = logger )
observers . append ( cmd_worker )
if not opts . quiet or opts . plot is not None or opts . save_image is not None :
oformat = opts . printf . replace ( "\\n" , "\n" ) . replace ( "\\t" , "\t" ) . replace ( "\\r" , "\r" )
converter = seconds_to_str_fromatter ( opts . time_format )
log_worker = LogWorker ( print_detections = not opts . quiet , output_format = oformat , time_formatter = converter , logger = logger , debug = opts . debug )
observers . append ( log_worker )
tokenizer_worker = TokenizerWorker ( ads , tokenizer , opts . analysis_window , observers )
def _save_main_stream ( ) : # find file type
main_type = opts . output_type
if main_type is None :
main_type = os . path . splitext ( opts . output_main ) [ 1 ] [ 1 : ]
if main_type == "" :
main_type = "wav"
ads . close ( )
ads . rewind ( )
data = ads . get_audio_source ( ) . get_data_buffer ( )
if len ( data ) > 0 :
save_audio_data ( data = data , filename = opts . output_main , filetype = main_type , sr = asource . get_sampling_rate ( ) , sw = asource . get_sample_width ( ) , ch = asource . get_channels ( ) )
def _plot ( ) :
import numpy as np
ads . close ( )
ads . rewind ( )
data = ads . get_audio_source ( ) . get_data_buffer ( )
signal = AudioEnergyValidator . _convert ( data , asource . get_sample_width ( ) )
detections = [ ( det [ 3 ] , det [ 4 ] ) for det in log_worker . detections ]
max_amplitude = 2 ** ( asource . get_sample_width ( ) * 8 - 1 ) - 1
energy_as_amp = np . sqrt ( np . exp ( opts . energy_threshold * np . log ( 10 ) / 10 ) ) / max_amplitude
plot_all ( signal / max_amplitude , asource . get_sampling_rate ( ) , energy_as_amp , detections , show = opts . plot , save_as = opts . save_image )
# start observer threads
for obs in observers :
obs . start ( )
# start tokenization thread
tokenizer_worker . start ( )
while True :
time . sleep ( 1 )
if len ( threading . enumerate ( ) ) == 1 :
break
tokenizer_worker = None
if opts . output_main is not None :
_save_main_stream ( )
if opts . plot or opts . save_image is not None :
_plot ( )
return 0
except KeyboardInterrupt :
if tokenizer_worker is not None :
tokenizer_worker . stop ( )
for obs in observers :
obs . stop ( )
if opts . output_main is not None :
_save_main_stream ( )
if opts . plot or opts . save_image is not None :
_plot ( )
return 0
except Exception as e :
sys . stderr . write ( program_name + ": " + str ( e ) + "\n" )
sys . stderr . write ( "for help use -h\n" )
return 2
|
def _norm ( self , x ) :
"""Return the norm of ` ` x ` ` .
This method is intended to be private . Public callers should
resort to ` norm ` which is type - checked ."""
|
return float ( np . sqrt ( self . inner ( x , x ) . real ) )
|
def keys ( self ) :
"""Return ids of all indexed documents ."""
|
result = [ ]
if self . fresh_index is not None :
result += self . fresh_index . keys ( )
if self . opt_index is not None :
result += self . opt_index . keys ( )
return result
|
def get_directory_nodes ( self , path ) :
"""Returns the : class : ` umbra . components . factory . script _ editor . nodes . DirectoryNode ` class Nodes with given path .
: param path : Directory path .
: type path : unicode
: return : DirectoryNode nodes .
: rtype : list"""
|
return [ directory_node for directory_node in self . list_directory_nodes ( ) if directory_node . path == path ]
|
def wrap ( self , message ) :
"""[ MS - NLMP ] v28.0 2016-07-14
3.4.6 GSS _ WrapEx ( )
Emulates the GSS _ Wrap ( ) implementation to sign and seal messages if the correct flags
are set .
@ param message : The message data that will be wrapped
@ return message : The message that has been sealed if flags are set
@ return signature : The signature of the message , None if flags are not set"""
|
if self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SEAL :
encrypted_message = self . _seal_message ( message )
signature = self . _get_signature ( message )
message = encrypted_message
elif self . negotiate_flags & NegotiateFlags . NTLMSSP_NEGOTIATE_SIGN :
signature = self . _get_signature ( message )
else :
signature = None
return message , signature
|
def get_email_logs ( self ) :
'''Returns a string representation of logs .
Only displays errors and warnings in the email logs
to avoid being verbose'''
|
message = ""
for log in self . record :
if log [ "log_type" ] in [ ERROR , WARNING ] :
message += self . format_message ( ** log )
return message
|
def packages ( self ) :
"""Show all packages"""
|
pattern = re . compile ( r'package:(/[^=]+\.apk)=([^\s]+)' )
packages = [ ]
for line in self . shell ( 'pm' , 'list' , 'packages' , '-f' ) . splitlines ( ) :
m = pattern . match ( line )
if not m :
continue
path , name = m . group ( 1 ) , m . group ( 2 )
packages . append ( self . Package ( name , path ) )
return packages
|
def _loglr ( self ) :
r"""Computes the log likelihood ratio ,
. . math : :
\ log \ mathcal { L } ( \ Theta ) =
I _ 0 \ left ( \ left | \ sum _ i O ( h ^ 0 _ i , d _ i ) \ right | \ right ) -
\ frac { 1 } { 2 } \ left < h ^ 0 _ i , h ^ 0 _ i \ right > ,
at the current point in parameter space : math : ` \ Theta ` .
Returns
float
The value of the log likelihood ratio evaluated at the given point ."""
|
params = self . current_params
try :
wfs = self . _waveform_generator . generate ( ** params )
except NoWaveformError :
return self . _nowaveform_loglr ( )
hh = 0.
hd = 0j
for det , h in wfs . items ( ) : # the kmax of the waveforms may be different than internal kmax
kmax = min ( len ( h ) , self . _kmax )
if self . _kmin >= kmax : # if the waveform terminates before the filtering low frequency
# cutoff , then the loglr is just 0 for this detector
hh_i = 0.
hd_i = 0j
else : # whiten the waveform
h [ self . _kmin : kmax ] *= self . _weight [ det ] [ self . _kmin : kmax ]
# calculate inner products
hh_i = h [ self . _kmin : kmax ] . inner ( h [ self . _kmin : kmax ] ) . real
hd_i = self . data [ det ] [ self . _kmin : kmax ] . inner ( h [ self . _kmin : kmax ] )
# store
setattr ( self . _current_stats , '{}_optimal_snrsq' . format ( det ) , hh_i )
hh += hh_i
hd += hd_i
hd = abs ( hd )
self . _current_stats . maxl_phase = numpy . angle ( hd )
return numpy . log ( special . i0e ( hd ) ) + hd - 0.5 * hh
|
def select ( self , table , fields = [ '*' ] , where = None , orderby = None , limit = None , offset = None ) :
"""Query and return list of records .
> > > import getpass
> > > s = DB ( dbname = ' test ' , user = getpass . getuser ( ) , host = ' localhost ' ,
. . . password = ' ' )
> > > s . execute ( ' drop table if exists t2 ' )
> > > s . execute ( ' create table t2 ( id int , name text ) ' )
> > > s . insert ( ' t2 ' , { ' id ' : 1 , ' name ' : ' Toto ' } )
> > > rows = s . select ( ' t2 ' )
> > > len ( rows )
> > > row = rows [ 0]
> > > row
{ ' id ' : 1 , ' name ' : ' Toto ' }"""
|
( sql , values ) = sqlselect ( table , fields , where , orderby , limit , offset )
self . execute ( sql , values )
return self . fetchall ( )
|
def cleanup ( self , cluster ) :
"""Deletes the inventory file used last recently used .
: param cluster : cluster to clear up inventory file for
: type cluster : : py : class : ` elasticluster . cluster . Cluster `"""
|
if self . _storage_path and os . path . exists ( self . _storage_path ) :
fname = '%s.%s' % ( AnsibleSetupProvider . inventory_file_ending , cluster . name )
inventory_path = os . path . join ( self . _storage_path , fname )
if os . path . exists ( inventory_path ) :
try :
os . unlink ( inventory_path )
if self . _storage_path_tmp :
if len ( os . listdir ( self . _storage_path ) ) == 0 :
shutil . rmtree ( self . _storage_path )
except OSError as ex :
log . warning ( "AnsibileProvider: Ignoring error while deleting " "inventory file %s: %s" , inventory_path , ex )
|
def select ( self , axis : AxisIdentifier , index , force_copy : bool = False ) -> HistogramBase :
"""Select in an axis .
Parameters
axis : int or str
Axis , in which we select .
index : int or slice
Index of bin ( as in numpy ) .
force _ copy : bool
If True , identity slice force a copy to be made ."""
|
if index == slice ( None ) and not force_copy :
return self
axis_id = self . _get_axis ( axis )
array_index = [ slice ( None , None , None ) for i in range ( self . ndim ) ]
array_index [ axis_id ] = index
frequencies = self . _frequencies [ tuple ( array_index ) ] . copy ( )
errors2 = self . _errors2 [ tuple ( array_index ) ] . copy ( )
if isinstance ( index , int ) :
return self . _reduce_dimension ( [ ax for ax in range ( self . ndim ) if ax != axis_id ] , frequencies , errors2 )
elif isinstance ( index , slice ) :
if index . step is not None and index . step < 0 :
raise IndexError ( "Cannot change the order of bins" )
copy = self . copy ( )
copy . _frequencies = frequencies
copy . _errors2 = errors2
copy . _binnings [ axis_id ] = self . _binnings [ axis_id ] [ index ]
return copy
else :
raise ValueError ( "Invalid index." )
|
def build ( self , builder ) :
"""Build XML by appending to builder"""
|
params = dict ( OID = self . oid , Name = self . name )
if self . field_number is not None :
params [ "FieldNumber" ] = str ( self . field_number )
builder . start ( "mdsol:LabelDef" , params )
for translation in self . translations :
translation . build ( builder )
for view_restriction in self . view_restrictions :
view_restriction . build ( builder )
builder . end ( "mdsol:LabelDef" )
|
def type_check ( func_handle ) :
"""Ensure arguments have the type specified in the annotation signature .
Example : :
def foo ( a , b : str , c : int = 0 , d : ( int , list ) = None ) :
pass
This function accepts an arbitrary parameter for ` ` a ` ` , a string
for ` ` b ` ` , an integer for ` ` c ` ` which defaults to 0 , and either
an integer or a list for ` ` d ` ` and defaults to ` ` None ` ` .
The decorator does not check return types and considers derived
classes as valid ( ie . the type check uses the Python native
` ` isinstance ` ` to do its job ) . For instance , if the function is
defined as : :
@ type _ check
def foo ( a : QtGui . QWidget ) :
pass
then the following two calls will both succeed : :
foo ( QtGui . QWidget ( ) )
foo ( QtGui . QTextEdit ( ) )
because ` ` QTextEdit ` ` inherits ` ` QWidget ` ` .
. . note : : the check is skipped if the value ( either passed or by
default ) is * * None * * .
| Raises |
* * * QtmacsArgumentError * * if at least one argument has an invalid type ."""
|
def checkType ( var_name , var_val , annot ) : # Retrieve the annotation for this variable and determine
# if the type of that variable matches with the annotation .
# This annotation is stored in the dictionary ` ` annot ` `
# but contains only variables for such an annotation exists ,
# hence the if / else branch .
if var_name in annot : # Fetch the type - annotation of the variable .
var_anno = annot [ var_name ]
# Skip the type check if the variable is none , otherwise
# check if it is a derived class . The only exception from
# the latter rule are binary values , because in Python
# > > isinstance ( False , int )
# True
# and warrants a special check .
if var_val is None :
type_ok = True
elif ( type ( var_val ) is bool ) :
type_ok = ( type ( var_val ) in var_anno )
else :
type_ok = True in [ isinstance ( var_val , _ ) for _ in var_anno ]
else : # Variable without annotation are compatible by assumption .
var_anno = 'Unspecified'
type_ok = True
# If the check failed then raise a QtmacsArgumentError .
if not type_ok :
args = ( var_name , func_handle . __name__ , var_anno , type ( var_val ) )
raise QtmacsArgumentError ( * args )
@ functools . wraps ( func_handle )
def wrapper ( * args , ** kwds ) : # Retrieve information about all arguments passed to the function ,
# as well as their annotations in the function signature .
argspec = inspect . getfullargspec ( func_handle )
# Convert all variable annotations that were not specified as a
# tuple or list into one , eg . str - - > will become ( str , )
annot = { }
for key , val in argspec . annotations . items ( ) :
if isinstance ( val , tuple ) or isinstance ( val , list ) :
annot [ key ] = val
else :
annot [ key ] = val ,
# Note the trailing colon !
# Prefix the argspec . defaults tuple with * * None * * elements to make
# its length equal to the number of variables ( for sanity in the
# code below ) . Since * * None * * types are always ignored by this
# decorator this change is neutral .
if argspec . defaults is None :
defaults = tuple ( [ None ] * len ( argspec . args ) )
else :
num_none = len ( argspec . args ) - len ( argspec . defaults )
defaults = tuple ( [ None ] * num_none ) + argspec . defaults
# Shorthand for the number of unnamed arguments .
ofs = len ( args )
# Process the unnamed arguments . These are always the first ` ` ofs ` `
# elements in argspec . args .
for idx , var_name in enumerate ( argspec . args [ : ofs ] ) : # Look up the value in the ` ` args ` ` variable .
var_val = args [ idx ]
checkType ( var_name , var_val , annot )
# Process the named - and default arguments .
for idx , var_name in enumerate ( argspec . args [ ofs : ] ) : # Extract the argument value . If it was passed to the
# function as a named ( ie . keyword ) argument then extract
# it from ` ` kwds ` ` , otherwise look it up in the tuple with
# the default values .
if var_name in kwds :
var_val = kwds [ var_name ]
else :
var_val = defaults [ idx + ofs ]
checkType ( var_name , var_val , annot )
return func_handle ( * args , ** kwds )
return wrapper
|
def get_min_max_bounds ( self ) :
"""Return a dict of min - and max - values for the given column .
This is required to estimate the bounds of images ."""
|
bound = Bound ( 999999.0 , 0.0 )
for bp in Breakpoint :
bound . extend ( self . get_bound ( bp ) )
return { 'min' : bound . min , 'max' : bound . max }
|
def _exception_gather_guard ( self , fn ) :
"""A higher order function to trap UserExceptions and then log them .
This is to present nicer output to the user when failures are
occuring in another thread of execution that may not end up at
the catch - all try / except in main ( ) ."""
|
@ functools . wraps ( fn )
def wrapper ( * args , ** kwargs ) :
try :
return fn ( * args , ** kwargs )
except UserException as e :
self . exceptions . append ( e )
return wrapper
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.